soju/upstream.go

2418 lines
61 KiB
Go
Raw Normal View History

2020-03-13 17:13:03 +00:00
package soju
import (
"context"
"crypto"
"crypto/sha256"
"crypto/sha512"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
2020-02-06 18:22:04 +00:00
"strconv"
"strings"
2020-02-06 18:22:04 +00:00
"time"
"github.com/emersion/go-sasl"
2022-11-14 11:06:58 +00:00
"gopkg.in/irc.v4"
2022-05-09 10:34:43 +00:00
"git.sr.ht/~emersion/soju/database"
2022-05-09 14:15:00 +00:00
"git.sr.ht/~emersion/soju/xirc"
)
2020-04-30 14:10:39 +00:00
// permanentUpstreamCaps is the static list of upstream capabilities always
// requested when supported.
var permanentUpstreamCaps = map[string]bool{
"account-notify": true,
2021-06-14 19:44:38 +00:00
"account-tag": true,
2020-04-30 14:10:39 +00:00
"away-notify": true,
"batch": true,
2022-03-21 15:30:58 +00:00
"chghost": true,
"extended-join": true,
"extended-monitor": true,
"invite-notify": true,
2020-04-30 14:10:39 +00:00
"labeled-response": true,
"message-tags": true,
"multi-prefix": true,
"sasl": true,
2020-04-30 14:10:39 +00:00
"server-time": true,
"setname": true,
"draft/account-registration": true,
"draft/extended-monitor": true,
2020-04-30 14:10:39 +00:00
}
// storableMessageTags is the static list of message tags that will cause
// a TAGMSG to be stored.
var storableMessageTags = map[string]bool{
"+react": true,
}
type registrationError struct {
*irc.Message
}
func (err registrationError) Error() string {
return fmt.Sprintf("registration error (%v): %v", err.Command, err.Reason())
}
func (err registrationError) Reason() string {
if len(err.Params) > 0 {
return err.Params[len(err.Params)-1]
}
return err.Command
}
func (err registrationError) Temporary() bool {
// Only return false if we're 100% sure that fixing the error requires a
// network configuration change
switch err.Command {
case irc.ERR_PASSWDMISMATCH, irc.ERR_ERRONEUSNICKNAME:
return false
case "FAIL":
return err.Params[1] != "ACCOUNT_REQUIRED"
default:
return true
}
}
2020-02-06 18:22:04 +00:00
type upstreamChannel struct {
2020-03-26 04:51:47 +00:00
Name string
conn *upstreamConn
Topic string
TopicWho *irc.Prefix
2020-03-26 04:51:47 +00:00
TopicTime time.Time
2022-05-29 15:28:25 +00:00
Status xirc.ChannelStatus
2020-03-26 04:51:47 +00:00
modes channelModes
creationTime string
2023-03-01 12:52:33 +00:00
Members xirc.CaseMappingMap[*xirc.MembershipSet]
2020-03-26 04:51:47 +00:00
complete bool
detachTimer *time.Timer
}
func (uc *upstreamChannel) updateAutoDetach(dur time.Duration) {
if uc.detachTimer != nil {
uc.detachTimer.Stop()
uc.detachTimer = nil
}
if dur == 0 {
return
}
uc.detachTimer = time.AfterFunc(dur, func() {
uc.conn.network.user.events <- eventChannelDetach{
uc: uc.conn,
name: uc.Name,
}
})
2020-02-06 18:22:04 +00:00
}
2022-05-30 07:45:40 +00:00
type upstreamBatch struct {
Type string
Params []string
Outer *upstreamBatch // if not-nil, this batch is nested in Outer
Label string
}
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
type upstreamUser struct {
Nickname string
Username string
Hostname string
Server string
Flags string
Account string
Realname string
}
func (uu *upstreamUser) hasWHOXFields(fields string) bool {
for i := 0; i < len(fields); i++ {
ok := false
switch fields[i] {
case 'n':
ok = uu.Nickname != ""
case 'u':
ok = uu.Username != ""
case 'h':
ok = uu.Hostname != ""
case 's':
ok = uu.Server != ""
case 'f':
ok = uu.Flags != ""
case 'a':
ok = uu.Account != ""
case 'r':
ok = uu.Realname != ""
case 't', 'c', 'i', 'd', 'l', 'o':
// we return static values for those fields, so they are always available
ok = true
}
if !ok {
return false
}
}
return true
}
func (uu *upstreamUser) updateFrom(update *upstreamUser) {
if update.Nickname != "" {
uu.Nickname = update.Nickname
}
if update.Username != "" {
uu.Username = update.Username
}
if update.Hostname != "" {
uu.Hostname = update.Hostname
}
if update.Server != "" {
uu.Server = update.Server
}
if update.Flags != "" {
uu.Flags = update.Flags
}
if update.Account != "" {
uu.Account = update.Account
}
if update.Realname != "" {
uu.Realname = update.Realname
}
}
type pendingUpstreamCommand struct {
downstreamID uint64
msg *irc.Message
sentAt time.Time
}
type upstreamConn struct {
conn
network *network
user *user
2020-02-06 16:04:49 +00:00
serverPrefix *irc.Prefix
2020-02-06 16:04:49 +00:00
serverName string
availableUserModes string
availableChannelModes map[byte]channelModeType
availableChannelTypes string
2022-05-30 07:12:28 +00:00
availableMemberships []xirc.Membership
2021-03-15 22:06:36 +00:00
isupport map[string]*string
2020-02-06 18:22:04 +00:00
2022-03-14 18:24:39 +00:00
registered bool
nick string
username string
realname string
2022-03-21 15:09:45 +00:00
hostname string
2022-03-14 18:24:39 +00:00
modes userModes
2023-03-01 12:52:33 +00:00
channels xirc.CaseMappingMap[*upstreamChannel]
users xirc.CaseMappingMap[*upstreamUser]
2022-05-29 16:26:28 +00:00
caps xirc.CapRegistry
2022-05-30 07:45:40 +00:00
batches map[string]upstreamBatch
2022-03-14 18:24:39 +00:00
away bool
account string
nextLabelID uint64
2023-03-01 12:52:33 +00:00
monitored xirc.CaseMappingMap[bool]
saslClient sasl.Client
saslStarted bool
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
// Queue of commands in progress, indexed by type. The first entry has been
// sent to the server and is awaiting reply. The following entries have not
// been sent yet.
pendingCmds map[string][]pendingUpstreamCommand
pendingRegainNick string
regainNickTimer *time.Timer
regainNickBackoff *backoffer
gotMotd bool
hasDesiredNick bool
}
2021-12-02 09:53:43 +00:00
func connectToUpstream(ctx context.Context, network *network) (*upstreamConn, error) {
2021-04-13 18:16:37 +00:00
logger := &prefixLogger{network.user.logger, fmt.Sprintf("upstream %q: ", network.GetName())}
ctx, cancel := context.WithTimeout(ctx, connectTimeout)
defer cancel()
var dialer net.Dialer
u, err := network.URL()
if err != nil {
return nil, err
}
var netConn net.Conn
switch u.Scheme {
case "ircs":
addr := u.Host
host, _, err := net.SplitHostPort(u.Host)
if err != nil {
host = u.Host
addr = u.Host + ":6697"
}
2021-12-02 09:53:43 +00:00
dialer.LocalAddr, err = network.user.localTCPAddrForHost(ctx, host)
if err != nil {
return nil, fmt.Errorf("failed to pick local IP for remote host %q: %v", host, err)
}
logger.Printf("connecting to TLS server at address %q", addr)
tlsConfig := &tls.Config{ServerName: host, NextProtos: []string{"irc"}}
if network.SASL.Mechanism == "EXTERNAL" {
if network.SASL.External.CertBlob == nil {
return nil, fmt.Errorf("missing certificate for authentication")
}
if network.SASL.External.PrivKeyBlob == nil {
return nil, fmt.Errorf("missing private key for authentication")
}
key, err := x509.ParsePKCS8PrivateKey(network.SASL.External.PrivKeyBlob)
if err != nil {
return nil, fmt.Errorf("failed to parse private key: %v", err)
}
tlsConfig.Certificates = []tls.Certificate{
{
Certificate: [][]byte{network.SASL.External.CertBlob},
PrivateKey: key.(crypto.PrivateKey),
},
}
logger.Printf("using TLS client certificate %x", sha256.Sum256(network.SASL.External.CertBlob))
}
if network.CertFP != "" {
tlsConfig.InsecureSkipVerify = true
tlsConfig.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
if len(rawCerts) == 0 {
return fmt.Errorf("the server didn't present any TLS certificate")
}
parts := strings.SplitN(network.CertFP, ":", 2)
algo, localCertFP := parts[0], parts[1]
for _, rawCert := range rawCerts {
var remoteCertFP string
switch algo {
case "sha-512":
sum := sha512.Sum512(rawCert)
remoteCertFP = hex.EncodeToString(sum[:])
case "sha-256":
sum := sha256.Sum256(rawCert)
remoteCertFP = hex.EncodeToString(sum[:])
}
if remoteCertFP == localCertFP {
return nil // fingerprints match
}
}
// Fingerprints don't match, let's give the user a fingerprint
// they can use to connect
sum := sha512.Sum512(rawCerts[0])
remoteCertFP := hex.EncodeToString(sum[:])
return fmt.Errorf("the configured TLS certificate fingerprint doesn't match the server's - %s", remoteCertFP)
}
}
2021-12-02 09:53:43 +00:00
netConn, err = dialer.DialContext(ctx, "tcp", addr)
if err != nil {
return nil, fmt.Errorf("failed to dial %q: %v", addr, err)
}
// Don't do the TLS handshake immediately, because we need to register
// the new connection with identd ASAP. See:
// https://todo.sr.ht/~emersion/soju/69#event-41859
netConn = tls.Client(netConn, tlsConfig)
case "irc+insecure":
addr := u.Host
host, _, err := net.SplitHostPort(addr)
if err != nil {
host = u.Host
addr = u.Host + ":6667"
}
2021-12-02 09:53:43 +00:00
dialer.LocalAddr, err = network.user.localTCPAddrForHost(ctx, host)
if err != nil {
return nil, fmt.Errorf("failed to pick local IP for remote host %q: %v", host, err)
}
logger.Printf("connecting to plain-text server at address %q", addr)
2021-12-02 09:53:43 +00:00
netConn, err = dialer.DialContext(ctx, "tcp", addr)
if err != nil {
return nil, fmt.Errorf("failed to dial %q: %v", addr, err)
}
case "irc+unix", "unix":
logger.Printf("connecting to Unix socket at path %q", u.Path)
2021-12-02 09:53:43 +00:00
netConn, err = dialer.DialContext(ctx, "unix", u.Path)
if err != nil {
return nil, fmt.Errorf("failed to connect to Unix socket %q: %v", u.Path, err)
}
default:
return nil, fmt.Errorf("failed to dial %q: unknown scheme: %v", network.Addr, u.Scheme)
}
options := connOptions{
2020-08-20 07:13:56 +00:00
Logger: logger,
RateLimitDelay: upstreamMessageDelay,
RateLimitBurst: upstreamMessageBurst,
}
cm := stdCaseMapping
uc := &upstreamConn{
conn: *newConn(network.user.srv, newNetIRCConn(netConn), &options),
network: network,
user: network.user,
2023-03-01 12:52:33 +00:00
channels: xirc.NewCaseMappingMap[*upstreamChannel](cm),
users: xirc.NewCaseMappingMap[*upstreamUser](cm),
2022-05-29 16:26:28 +00:00
caps: xirc.NewCapRegistry(),
2022-05-30 07:45:40 +00:00
batches: make(map[string]upstreamBatch),
serverPrefix: &irc.Prefix{Name: "*"},
availableChannelTypes: stdChannelTypes,
availableChannelModes: stdChannelModes,
availableMemberships: stdMemberships,
isupport: make(map[string]*string),
pendingCmds: make(map[string][]pendingUpstreamCommand),
2023-03-01 12:52:33 +00:00
monitored: xirc.NewCaseMappingMap[bool](cm),
hasDesiredNick: true,
}
return uc, nil
}
func (uc *upstreamConn) forEachDownstream(f func(*downstreamConn)) {
uc.network.forEachDownstream(f)
}
func (uc *upstreamConn) forEachDownstreamByID(id uint64, f func(*downstreamConn)) {
uc.forEachDownstream(func(dc *downstreamConn) {
if id != 0 && id != dc.id {
return
}
f(dc)
})
}
func (uc *upstreamConn) downstreamByID(id uint64) *downstreamConn {
for _, dc := range uc.user.downstreamConns {
if dc.id == id {
return dc
}
}
return nil
}
func (uc *upstreamConn) getChannel(name string) (*upstreamChannel, error) {
ch := uc.channels.Get(name)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if ch == nil {
2020-02-06 18:22:04 +00:00
return nil, fmt.Errorf("unknown channel %q", name)
}
return ch, nil
}
func (uc *upstreamConn) isChannel(entity string) bool {
return len(entity) > 0 && strings.ContainsRune(uc.availableChannelTypes, rune(entity[0]))
}
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
func (uc *upstreamConn) isOurNick(nick string) bool {
return uc.network.equalCasemap(uc.nick, nick)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
}
func (uc *upstreamConn) forwardMessage(ctx context.Context, msg *irc.Message) {
uc.forEachDownstream(func(dc *downstreamConn) {
dc.SendMessage(ctx, msg)
})
}
func (uc *upstreamConn) forwardMsgByID(ctx context.Context, id uint64, msg *irc.Message) {
2023-04-03 20:32:49 +00:00
uc.forEachDownstreamByID(id, func(dc *downstreamConn) {
dc.SendMessage(ctx, msg)
2023-04-03 20:32:49 +00:00
})
}
func (uc *upstreamConn) abortPendingCommands() {
ctx := context.TODO()
for _, l := range uc.pendingCmds {
for _, pendingCmd := range l {
dc := uc.downstreamByID(pendingCmd.downstreamID)
if dc == nil {
continue
}
switch pendingCmd.msg.Command {
case "LIST":
dc.SendMessage(ctx, &irc.Message{
Prefix: dc.srv.prefix(),
Command: irc.RPL_LISTEND,
Params: []string{dc.nick, "Command aborted"},
})
case "WHO":
mask := "*"
if len(pendingCmd.msg.Params) > 0 {
mask = pendingCmd.msg.Params[0]
}
dc.SendMessage(ctx, &irc.Message{
Prefix: dc.srv.prefix(),
Command: irc.RPL_ENDOFWHO,
Params: []string{dc.nick, mask, "Command aborted"},
})
upstream: fix missing WHOIS in abortPendingCommands Fixes the following panic: 2022/05/03 08:05:32 panic serving user "asdf": Unsupported pending command "WHOIS" goroutine 15 [running]: runtime/debug.Stack() /opt/go/src/runtime/debug/stack.go:24 +0x65 git.sr.ht/~emersion/soju.(*Server).addUserLocked.func1.1() ~/soju/server.go:317 +0x5d panic({0xa18da0, 0x6815bf10}) /opt/go/src/runtime/panic.go:838 +0x207 git.sr.ht/~emersion/soju.(*upstreamConn).abortPendingCommands(0x6811c9c0) ~/soju/upstream.go:338 +0x953 git.sr.ht/~emersion/soju.(*user).handleUpstreamDisconnected(0x680b7080, 0x6811c9c0) ~/soju/user.go:744 +0x6d git.sr.ht/~emersion/soju.(*user).updateNetwork(0x680b7080, {0xb99a00, 0x684690e0}, 0x681343c0) ~/soju/user.go:936 +0x387 git.sr.ht/~emersion/soju.handleServiceNetworkUpdate({0xb99a00, 0x684690e0}, 0x68116000, {0x681b8a20?, 0x40429108?, 0x10?}) ~/soju/service.go:590 +0x14f git.sr.ht/~emersion/soju.handleServicePRIVMSG({0xb99a00, 0x684690e0}, 0x68116000, {0x680af5d5?, 0x1?}) ~/soju/service.go:146 +0x7df git.sr.ht/~emersion/soju.(*downstreamConn).handleMessageRegistered(0x68116000, {0xb99a00, 0x684690e0}, 0x681b8940) ~/soju/downstream.go:2503 +0x9e9e git.sr.ht/~emersion/soju.(*downstreamConn).handleMessage(0x68116000, {0xb999c8?, 0x680240a0?}, 0x681b8940) ~/soju/downstream.go:727 +0xde git.sr.ht/~emersion/soju.(*user).run(0x680b7080) ~/soju/user.go:690 +0xe05 git.sr.ht/~emersion/soju.(*Server).addUserLocked.func1() ~/soju/server.go:327 +0x70 created by git.sr.ht/~emersion/soju.(*Server).addUserLocked ~/soju/server.go:314 +0x178
2022-05-03 06:32:59 +00:00
case "WHOIS":
nick := pendingCmd.msg.Params[len(pendingCmd.msg.Params)-1]
dc.SendMessage(ctx, &irc.Message{
upstream: fix missing WHOIS in abortPendingCommands Fixes the following panic: 2022/05/03 08:05:32 panic serving user "asdf": Unsupported pending command "WHOIS" goroutine 15 [running]: runtime/debug.Stack() /opt/go/src/runtime/debug/stack.go:24 +0x65 git.sr.ht/~emersion/soju.(*Server).addUserLocked.func1.1() ~/soju/server.go:317 +0x5d panic({0xa18da0, 0x6815bf10}) /opt/go/src/runtime/panic.go:838 +0x207 git.sr.ht/~emersion/soju.(*upstreamConn).abortPendingCommands(0x6811c9c0) ~/soju/upstream.go:338 +0x953 git.sr.ht/~emersion/soju.(*user).handleUpstreamDisconnected(0x680b7080, 0x6811c9c0) ~/soju/user.go:744 +0x6d git.sr.ht/~emersion/soju.(*user).updateNetwork(0x680b7080, {0xb99a00, 0x684690e0}, 0x681343c0) ~/soju/user.go:936 +0x387 git.sr.ht/~emersion/soju.handleServiceNetworkUpdate({0xb99a00, 0x684690e0}, 0x68116000, {0x681b8a20?, 0x40429108?, 0x10?}) ~/soju/service.go:590 +0x14f git.sr.ht/~emersion/soju.handleServicePRIVMSG({0xb99a00, 0x684690e0}, 0x68116000, {0x680af5d5?, 0x1?}) ~/soju/service.go:146 +0x7df git.sr.ht/~emersion/soju.(*downstreamConn).handleMessageRegistered(0x68116000, {0xb99a00, 0x684690e0}, 0x681b8940) ~/soju/downstream.go:2503 +0x9e9e git.sr.ht/~emersion/soju.(*downstreamConn).handleMessage(0x68116000, {0xb999c8?, 0x680240a0?}, 0x681b8940) ~/soju/downstream.go:727 +0xde git.sr.ht/~emersion/soju.(*user).run(0x680b7080) ~/soju/user.go:690 +0xe05 git.sr.ht/~emersion/soju.(*Server).addUserLocked.func1() ~/soju/server.go:327 +0x70 created by git.sr.ht/~emersion/soju.(*Server).addUserLocked ~/soju/server.go:314 +0x178
2022-05-03 06:32:59 +00:00
Prefix: dc.srv.prefix(),
Command: irc.RPL_ENDOFWHOIS,
Params: []string{dc.nick, nick, "Command aborted"},
})
case "AUTHENTICATE":
dc.endSASL(ctx, &irc.Message{
Prefix: dc.srv.prefix(),
Command: irc.ERR_SASLABORTED,
Params: []string{dc.nick, "SASL authentication aborted"},
})
case "REGISTER", "VERIFY":
dc.SendMessage(ctx, &irc.Message{
Prefix: dc.srv.prefix(),
Command: "FAIL",
Params: []string{pendingCmd.msg.Command, "TEMPORARILY_UNAVAILABLE", pendingCmd.msg.Params[0], "Command aborted"},
})
default:
panic(fmt.Errorf("Unsupported pending command %q", pendingCmd.msg.Command))
}
}
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
uc.pendingCmds = make(map[string][]pendingUpstreamCommand)
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
func (uc *upstreamConn) sendNextPendingCommand(cmd string) {
if len(uc.pendingCmds[cmd]) == 0 {
return
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
pendingCmd := &uc.pendingCmds[cmd][0]
uc.SendMessageLabeled(context.TODO(), pendingCmd.downstreamID, pendingCmd.msg)
pendingCmd.sentAt = time.Now()
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
func (uc *upstreamConn) enqueueCommand(dc *downstreamConn, msg *irc.Message) {
switch msg.Command {
case "LIST", "WHO", "WHOIS", "AUTHENTICATE", "REGISTER", "VERIFY":
// Supported
default:
panic(fmt.Errorf("Unsupported pending command %q", msg.Command))
}
uc.pendingCmds[msg.Command] = append(uc.pendingCmds[msg.Command], pendingUpstreamCommand{
downstreamID: dc.id,
msg: msg,
})
// If we didn't get a reply after a while, just give up
// TODO: consider sending an abort reply to downstream
if t := uc.pendingCmds[msg.Command][0].sentAt; !t.IsZero() && time.Since(t) > 30*time.Second {
copy(uc.pendingCmds[msg.Command], uc.pendingCmds[msg.Command][1:])
}
if len(uc.pendingCmds[msg.Command]) == 1 {
uc.sendNextPendingCommand(msg.Command)
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
}
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
func (uc *upstreamConn) currentPendingCommand(cmd string) (*downstreamConn, *irc.Message) {
if len(uc.pendingCmds[cmd]) == 0 {
return nil, nil
}
pendingCmd := uc.pendingCmds[cmd][0]
return uc.downstreamByID(pendingCmd.downstreamID), pendingCmd.msg
}
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
func (uc *upstreamConn) dequeueCommand(cmd string) (*downstreamConn, *irc.Message) {
dc, msg := uc.currentPendingCommand(cmd)
if len(uc.pendingCmds[cmd]) > 0 {
copy(uc.pendingCmds[cmd], uc.pendingCmds[cmd][1:])
uc.pendingCmds[cmd] = uc.pendingCmds[cmd][:len(uc.pendingCmds[cmd])-1]
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
uc.sendNextPendingCommand(cmd)
return dc, msg
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
func (uc *upstreamConn) cancelPendingCommandsByDownstreamID(downstreamID uint64) {
for cmd := range uc.pendingCmds {
// We can't cancel the currently running command stored in
// uc.pendingCmds[cmd][0]
for i := len(uc.pendingCmds[cmd]) - 1; i >= 1; i-- {
if uc.pendingCmds[cmd][i].downstreamID == downstreamID {
uc.pendingCmds[cmd] = append(uc.pendingCmds[cmd][:i], uc.pendingCmds[cmd][i+1:]...)
}
}
}
}
2022-05-30 07:12:28 +00:00
func (uc *upstreamConn) parseMembershipPrefix(s string) (ms xirc.MembershipSet, nick string) {
var memberships xirc.MembershipSet
i := 0
for _, m := range uc.availableMemberships {
if i >= len(s) {
break
}
if s[i] == m.Prefix {
memberships = append(memberships, m)
i++
}
}
2022-05-30 07:12:28 +00:00
return memberships, s[i:]
}
func (uc *upstreamConn) handleMessage(ctx context.Context, msg *irc.Message) error {
var label string
2022-11-14 11:06:58 +00:00
if l, ok := msg.Tags["label"]; ok {
label = l
delete(msg.Tags, "label")
}
2022-05-30 07:45:40 +00:00
var msgBatch *upstreamBatch
2022-11-14 11:06:58 +00:00
if batchName, ok := msg.Tags["batch"]; ok {
2020-03-23 02:18:16 +00:00
b, ok := uc.batches[batchName]
if !ok {
return fmt.Errorf("unexpected batch reference: batch was not defined: %q", batchName)
}
msgBatch = &b
if label == "" {
label = msgBatch.Label
}
delete(msg.Tags, "batch")
}
2022-03-22 20:10:06 +00:00
var downstreamID uint64
if label != "" {
var labelOffset uint64
n, err := fmt.Sscanf(label, "sd-%d-%d", &downstreamID, &labelOffset)
if err == nil && n < 2 {
err = errors.New("not enough arguments")
}
if err != nil {
return fmt.Errorf("unexpected message label: invalid downstream reference for label %q: %v", label, err)
}
2020-03-23 02:18:16 +00:00
}
if msg.Prefix == nil {
msg.Prefix = uc.serverPrefix
}
if !isNumeric(msg.Command) {
t, err := time.Parse(xirc.ServerTimeLayout, string(msg.Tags["time"]))
if err != nil {
t = time.Now()
}
msg.Tags["time"] = uc.user.FormatServerTime(t)
2020-04-03 18:48:23 +00:00
}
switch msg.Command {
case "PING":
uc.SendMessage(ctx, &irc.Message{
Command: "PONG",
2020-02-18 19:40:32 +00:00
Params: msg.Params,
})
return nil
case "NOTICE", "PRIVMSG", "TAGMSG":
var target, text string
if msg.Command != "TAGMSG" {
if err := parseMessageParams(msg, &target, &text); err != nil {
return err
}
} else {
if err := parseMessageParams(msg, &target); err != nil {
return err
}
}
if uc.network.equalCasemap(msg.Prefix.Name, serviceNick) {
uc.logger.Printf("skipping %v from soju's service: %v", msg.Command, msg)
break
}
if uc.network.equalCasemap(target, serviceNick) {
uc.logger.Printf("skipping %v to soju's service: %v", msg.Command, msg)
break
}
if !uc.registered || uc.network.equalCasemap(msg.Prefix.Name, uc.serverPrefix.Name) || target == "*" || strings.HasPrefix(target, "$") {
// This is a server message
uc.produce("", msg, 0)
break
}
directMessage := uc.isOurNick(target)
bufferName := target
if directMessage {
bufferName = msg.Prefix.Name
}
if t, ok := msg.Tags["+draft/channel-context"]; ok {
ch := uc.channels.Get(string(t))
if ch != nil && ch.Members.Has(msg.Prefix.Name) {
bufferName = ch.Name
directMessage = false
}
}
self := uc.isOurNick(msg.Prefix.Name)
ch := uc.network.channels.Get(bufferName)
highlight := false
if ch != nil && msg.Command != "TAGMSG" && !self {
if ch.Detached {
uc.handleDetachedMessage(ctx, ch, msg)
}
highlight = uc.network.isHighlight(msg)
if ch.DetachOn == database.FilterMessage || ch.DetachOn == database.FilterDefault || (ch.DetachOn == database.FilterHighlight && highlight) {
uc.updateChannelAutoDetach(bufferName)
}
}
if highlight || directMessage {
go uc.network.broadcastWebPush(msg)
if timestamp, err := time.Parse(xirc.ServerTimeLayout, string(msg.Tags["time"])); err == nil {
uc.network.pushTargets.Set(bufferName, timestamp)
}
}
uc.produce(bufferName, msg, downstreamID)
2020-03-13 10:26:43 +00:00
case "CAP":
var subCmd string
if err := parseMessageParams(msg, nil, &subCmd); err != nil {
return err
2020-03-13 10:26:43 +00:00
}
subCmd = strings.ToUpper(subCmd)
subParams := msg.Params[2:]
switch subCmd {
case "LS":
if len(subParams) < 1 {
return newNeedMoreParamsError(msg.Command)
}
2020-04-30 13:27:41 +00:00
caps := subParams[len(subParams)-1]
more := len(subParams) >= 2 && msg.Params[len(subParams)-2] == "*"
2020-04-30 13:27:41 +00:00
uc.handleSupportedCaps(caps)
if more {
break // wait to receive all capabilities
}
uc.updateCaps(ctx)
if uc.requestSASL() {
break // we'll send CAP END after authentication is completed
}
2020-03-13 10:26:43 +00:00
uc.SendMessage(ctx, &irc.Message{
2020-03-13 10:26:43 +00:00
Command: "CAP",
Params: []string{"END"},
})
case "ACK", "NAK":
if len(subParams) < 1 {
return newNeedMoreParamsError(msg.Command)
}
caps := strings.Fields(subParams[0])
for _, name := range caps {
enable := subCmd == "ACK"
if strings.HasPrefix(name, "-") {
name = strings.TrimPrefix(name, "-")
enable = false
}
if err := uc.handleCapAck(ctx, strings.ToLower(name), enable); err != nil {
return err
}
}
2020-04-30 13:27:41 +00:00
if uc.registered {
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateSupportedCaps(ctx)
2020-04-30 13:27:41 +00:00
})
}
case "NEW":
if len(subParams) < 1 {
return newNeedMoreParamsError(msg.Command)
}
uc.handleSupportedCaps(subParams[0])
uc.updateCaps(ctx)
2020-04-30 13:27:41 +00:00
case "DEL":
if len(subParams) < 1 {
return newNeedMoreParamsError(msg.Command)
}
caps := strings.Fields(subParams[0])
for _, c := range caps {
2022-03-14 18:24:39 +00:00
uc.caps.Del(c)
2020-04-30 13:27:41 +00:00
}
if uc.registered {
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateSupportedCaps(ctx)
2020-04-30 13:27:41 +00:00
})
}
default:
uc.logger.Printf("unhandled message: %v", msg)
}
case "AUTHENTICATE":
if uc.saslClient == nil {
return fmt.Errorf("received unexpected AUTHENTICATE message")
}
// TODO: if a challenge is 400 bytes long, buffer it
var challengeStr string
if err := parseMessageParams(msg, &challengeStr); err != nil {
uc.SendMessage(ctx, &irc.Message{
Command: "AUTHENTICATE",
Params: []string{"*"},
})
return err
}
var challenge []byte
if challengeStr != "+" {
var err error
challenge, err = base64.StdEncoding.DecodeString(challengeStr)
if err != nil {
uc.SendMessage(ctx, &irc.Message{
Command: "AUTHENTICATE",
Params: []string{"*"},
})
return err
}
}
var resp []byte
var err error
if !uc.saslStarted {
_, resp, err = uc.saslClient.Start()
uc.saslStarted = true
} else {
resp, err = uc.saslClient.Next(challenge)
}
if err != nil {
uc.SendMessage(ctx, &irc.Message{
Command: "AUTHENTICATE",
Params: []string{"*"},
})
return err
}
2022-05-30 07:41:47 +00:00
for _, msg := range xirc.GenerateSASL(resp) {
uc.SendMessage(ctx, msg)
}
case irc.RPL_LOGGEDIN:
2022-03-21 15:09:45 +00:00
var rawPrefix string
if err := parseMessageParams(msg, nil, &rawPrefix, &uc.account); err != nil {
return err
2020-03-13 10:26:43 +00:00
}
2022-03-21 15:09:45 +00:00
prefix := irc.ParsePrefix(rawPrefix)
uc.username = prefix.User
uc.hostname = prefix.Host
2021-06-14 19:44:38 +00:00
uc.logger.Printf("logged in with account %q", uc.account)
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateAccount(ctx)
dc.updateHost(ctx)
})
case irc.RPL_LOGGEDOUT:
2022-03-21 15:09:45 +00:00
var rawPrefix string
if err := parseMessageParams(msg, nil, &rawPrefix); err != nil {
return err
}
2021-06-14 19:44:38 +00:00
uc.account = ""
2022-03-21 15:09:45 +00:00
prefix := irc.ParsePrefix(rawPrefix)
uc.username = prefix.User
uc.hostname = prefix.Host
uc.logger.Printf("logged out")
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateAccount(ctx)
dc.updateHost(ctx)
2022-03-21 15:09:45 +00:00
})
2022-05-09 15:18:51 +00:00
case xirc.RPL_VISIBLEHOST:
2022-03-21 15:09:45 +00:00
var rawHost string
if err := parseMessageParams(msg, nil, &rawHost); err != nil {
return err
}
parts := strings.SplitN(rawHost, "@", 2)
if len(parts) == 2 {
uc.username, uc.hostname = parts[0], parts[1]
} else {
uc.hostname = rawHost
}
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateHost(ctx)
})
case irc.ERR_NICKLOCKED, irc.RPL_SASLSUCCESS, irc.ERR_SASLFAIL, irc.ERR_SASLTOOLONG, irc.ERR_SASLABORTED:
var info string
if err := parseMessageParams(msg, nil, &info); err != nil {
return err
}
switch msg.Command {
case irc.ERR_NICKLOCKED:
uc.logger.Printf("invalid nick used with SASL authentication: %v", info)
case irc.ERR_SASLFAIL:
uc.logger.Printf("SASL authentication failed: %v", info)
case irc.ERR_SASLTOOLONG:
uc.logger.Printf("SASL message too long: %v", info)
}
uc.saslClient = nil
uc.saslStarted = false
if dc, _ := uc.dequeueCommand("AUTHENTICATE"); dc != nil && dc.sasl != nil {
if msg.Command == irc.RPL_SASLSUCCESS {
2022-10-14 08:44:32 +00:00
uc.network.autoSaveSASLPlain(ctx, dc.sasl.plain.Username, dc.sasl.plain.Password)
}
dc.endSASL(ctx, msg)
}
if !uc.registered {
uc.SendMessage(ctx, &irc.Message{
Command: "CAP",
Params: []string{"END"},
})
}
case "REGISTER", "VERIFY":
if dc, cmd := uc.dequeueCommand(msg.Command); dc != nil {
if msg.Command == "REGISTER" {
var account, password string
if err := parseMessageParams(msg, nil, &account); err != nil {
return err
}
if err := parseMessageParams(cmd, nil, nil, &password); err != nil {
return err
}
uc.network.autoSaveSASLPlain(ctx, account, password)
}
dc.SendMessage(ctx, msg)
}
2020-02-06 15:39:09 +00:00
case irc.RPL_WELCOME:
if err := parseMessageParams(msg, &uc.nick); err != nil {
return err
}
uc.registered = true
uc.serverPrefix = msg.Prefix
uc.logger.Printf("connection registered with nick %q", uc.nick)
2020-02-06 18:22:04 +00:00
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if uc.network.channels.Len() > 0 {
var channels, keys []string
2023-03-01 12:15:38 +00:00
uc.network.channels.ForEach(func(_ string, ch *database.Channel) {
channels = append(channels, ch.Name)
keys = append(keys, ch.Key)
})
for _, msg := range xirc.GenerateJoin(channels, keys) {
uc.SendMessage(ctx, msg)
}
2020-02-06 18:22:04 +00:00
}
2020-02-06 16:04:49 +00:00
case irc.RPL_MYINFO:
if err := parseMessageParams(msg, nil, &uc.serverName, nil, &uc.availableUserModes, nil); err != nil {
return err
}
case irc.RPL_ISUPPORT:
if err := parseMessageParams(msg, nil, nil); err != nil {
2020-02-07 11:36:02 +00:00
return err
2020-02-06 16:04:49 +00:00
}
2021-03-15 22:41:37 +00:00
var downstreamIsupport []string
for _, token := range msg.Params[1 : len(msg.Params)-1] {
parameter := token
2021-03-15 22:06:36 +00:00
var negate, hasValue bool
var value string
if strings.HasPrefix(token, "-") {
negate = true
token = token[1:]
2021-03-15 21:54:32 +00:00
} else if i := strings.IndexByte(token, '='); i >= 0 {
parameter = token[:i]
value = token[i+1:]
2021-03-15 22:06:36 +00:00
hasValue = true
}
parameter = strings.ToUpper(parameter)
2021-03-15 22:06:36 +00:00
if hasValue {
uc.isupport[parameter] = &value
} else if !negate {
uc.isupport[parameter] = nil
} else {
delete(uc.isupport, parameter)
}
2021-03-15 22:11:42 +00:00
var err error
switch parameter {
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
case "CASEMAPPING":
2023-03-01 12:30:47 +00:00
casemap := xirc.ParseCaseMapping(value)
if casemap == nil {
casemap = xirc.CaseMappingRFC1459
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
}
uc.network.updateCasemapping(casemap)
2021-03-15 22:11:42 +00:00
case "CHANMODES":
if !negate {
err = uc.handleChanModes(value)
} else {
uc.availableChannelModes = stdChannelModes
}
case "CHANTYPES":
if !negate {
uc.availableChannelTypes = value
2021-03-15 22:11:42 +00:00
} else {
uc.availableChannelTypes = stdChannelTypes
}
case "PREFIX":
if !negate {
err = uc.handleMemberships(value)
} else {
uc.availableMemberships = stdMemberships
}
}
2021-03-15 22:11:42 +00:00
if err != nil {
return err
}
2021-03-15 22:41:37 +00:00
if passthroughIsupport[parameter] {
downstreamIsupport = append(downstreamIsupport, token)
}
2020-02-06 16:04:49 +00:00
}
2021-03-15 22:41:37 +00:00
uc.updateMonitor()
2021-03-15 22:41:37 +00:00
uc.forEachDownstream(func(dc *downstreamConn) {
msgs := xirc.GenerateIsupport(dc.srv.prefix(), dc.nick, downstreamIsupport)
2021-03-15 22:41:37 +00:00
for _, msg := range msgs {
dc.SendMessage(ctx, msg)
2021-03-15 22:41:37 +00:00
}
})
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
case irc.ERR_NOMOTD, irc.RPL_ENDOFMOTD:
if !uc.gotMotd {
// Ignore the initial MOTD upon connection, but forward
// subsequent MOTD messages downstream
uc.gotMotd = true
// If upstream did not send any CASEMAPPING token, assume it
// implements the old RFCs with rfc1459.
if uc.isupport["CASEMAPPING"] == nil {
2023-03-01 12:30:47 +00:00
uc.network.updateCasemapping(stdCaseMapping)
}
// If the server doesn't support MONITOR, periodically try to
// regain our desired nick
if _, ok := uc.isupport["MONITOR"]; !ok {
uc.startRegainNickTimer()
}
return nil
}
uc.forwardMsgByID(ctx, downstreamID, msg)
2020-03-23 02:18:16 +00:00
case "BATCH":
var tag string
if err := parseMessageParams(msg, &tag); err != nil {
return err
}
if strings.HasPrefix(tag, "+") {
tag = tag[1:]
if _, ok := uc.batches[tag]; ok {
return fmt.Errorf("unexpected BATCH reference tag: batch was already defined: %q", tag)
}
var batchType string
if err := parseMessageParams(msg, nil, &batchType); err != nil {
return err
}
label := label
if label == "" && msgBatch != nil {
label = msgBatch.Label
}
2022-05-30 07:45:40 +00:00
uc.batches[tag] = upstreamBatch{
2020-03-23 02:18:16 +00:00
Type: batchType,
Params: msg.Params[2:],
Outer: msgBatch,
Label: label,
2020-03-23 02:18:16 +00:00
}
} else if strings.HasPrefix(tag, "-") {
tag = tag[1:]
if _, ok := uc.batches[tag]; !ok {
return fmt.Errorf("unknown BATCH reference tag: %q", tag)
}
delete(uc.batches, tag)
} else {
return fmt.Errorf("unexpected BATCH reference tag: missing +/- prefix: %q", tag)
}
2020-02-07 11:19:42 +00:00
case "NICK":
2020-02-07 11:36:02 +00:00
var newNick string
if err := parseMessageParams(msg, &newNick); err != nil {
return err
2020-02-07 11:19:42 +00:00
}
me := false
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if uc.isOurNick(msg.Prefix.Name) {
uc.logger.Printf("changed nick from %q to %q", uc.nick, newNick)
me = true
uc.nick = newNick
if uc.network.equalCasemap(uc.pendingRegainNick, newNick) {
uc.pendingRegainNick = ""
uc.stopRegainNickTimer()
}
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
if uc.network.equalCasemap(wantNick, newNick) {
uc.hasDesiredNick = true
}
2020-02-07 11:19:42 +00:00
}
2023-03-01 12:15:38 +00:00
uc.channels.ForEach(func(_ string, ch *upstreamChannel) {
memberships := ch.Members.Get(msg.Prefix.Name)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if memberships != nil {
ch.Members.Del(msg.Prefix.Name)
ch.Members.Set(newNick, memberships)
uc.appendLog(ch.Name, msg)
2020-02-07 11:19:42 +00:00
}
})
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
uc.cacheUserInfo(msg.Prefix.Name, &upstreamUser{
Nickname: newNick,
})
if !me {
uc.forwardMessage(ctx, msg)
} else {
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateNick(ctx)
})
uc.updateMonitor()
}
case "SETNAME":
var newRealname string
if err := parseMessageParams(msg, &newRealname); err != nil {
return err
}
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
uc.cacheUserInfo(msg.Prefix.Name, &upstreamUser{
Realname: newRealname,
})
// TODO: consider appending this message to logs
if uc.isOurNick(msg.Prefix.Name) {
uc.logger.Printf("changed realname from %q to %q", uc.realname, newRealname)
uc.realname = newRealname
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateRealname(ctx)
})
} else {
uc.forwardMessage(ctx, msg)
}
2022-03-21 15:30:58 +00:00
case "CHGHOST":
var newUsername, newHostname string
if err := parseMessageParams(msg, &newUsername, &newHostname); err != nil {
return err
}
newPrefix := &irc.Prefix{
Name: uc.nick,
User: newUsername,
Host: newHostname,
}
if uc.isOurNick(msg.Prefix.Name) {
uc.logger.Printf("changed prefix from %q to %q", msg.Prefix.Host, newPrefix)
uc.username = newUsername
uc.hostname = newHostname
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateHost(ctx)
2022-03-21 15:30:58 +00:00
})
} else {
// TODO: add fallback with QUIT/JOIN/MODE messages
uc.forwardMessage(ctx, msg)
2022-03-21 15:30:58 +00:00
}
2020-02-06 18:22:04 +00:00
case "JOIN":
2020-02-07 11:36:02 +00:00
var channels string
if err := parseMessageParams(msg, &channels); err != nil {
return err
2020-02-06 18:22:04 +00:00
}
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
uu := &upstreamUser{
Username: msg.Prefix.User,
Hostname: msg.Prefix.Host,
}
if uc.caps.IsEnabled("away-notify") {
// we have enough info to build the user flags in a best-effort manner:
// - the H/G flag is set to Here first, will be replaced by Gone later if the user is AWAY
uu.Flags = "H"
// - the B (bot mode) flag is set if the JOIN comes from a bot
// note: we have no way to track the user bot mode after they have joined
// (we are not notified of the bot mode updates), but this is good enough.
if _, ok := msg.Tags["bot"]; ok {
if bot := uc.isupport["BOT"]; bot != nil {
uu.Flags += *bot
}
}
// TODO: add the server operator flag (`*`) if the message has an oper-tag
}
if len(msg.Params) > 2 { // extended-join
uu.Account = msg.Params[1]
uu.Realname = msg.Params[2]
}
uc.cacheUserInfo(msg.Prefix.Name, uu)
2020-02-07 11:36:02 +00:00
for _, ch := range strings.Split(channels, ",") {
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if uc.isOurNick(msg.Prefix.Name) {
uc.logger.Printf("joined channel %q", ch)
2023-03-01 12:52:33 +00:00
members := xirc.NewCaseMappingMap[*xirc.MembershipSet](uc.network.casemap)
2023-03-01 12:15:38 +00:00
uc.channels.Set(ch, &upstreamChannel{
Name: ch,
conn: uc,
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
Members: members,
})
uc.updateChannelAutoDetach(ch)
uc.SendMessage(ctx, &irc.Message{
Command: "MODE",
Params: []string{ch},
})
} else {
ch, err := uc.getChannel(ch)
if err != nil {
return err
}
ch.Members.Set(msg.Prefix.Name, &xirc.MembershipSet{})
2020-02-06 18:22:04 +00:00
}
chMsg := msg.Copy()
chMsg.Params[0] = ch
uc.produce(ch, chMsg, 0)
}
case "PART":
2020-02-07 11:36:02 +00:00
var channels string
if err := parseMessageParams(msg, &channels); err != nil {
return err
}
2020-02-07 11:36:02 +00:00
for _, ch := range strings.Split(channels, ",") {
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if uc.isOurNick(msg.Prefix.Name) {
uc.logger.Printf("parted channel %q", ch)
if uch := uc.channels.Get(ch); uch != nil {
uc.channels.Del(ch)
uch.updateAutoDetach(0)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
uch.Members.ForEach(func(nick string, memberships *xirc.MembershipSet) {
if !uc.shouldCacheUserInfo(nick) {
uc.users.Del(nick)
}
})
}
} else {
ch, err := uc.getChannel(ch)
if err != nil {
return err
}
ch.Members.Del(msg.Prefix.Name)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
if !uc.shouldCacheUserInfo(msg.Prefix.Name) {
uc.users.Del(msg.Prefix.Name)
}
}
chMsg := msg.Copy()
chMsg.Params[0] = ch
uc.produce(ch, chMsg, 0)
}
case "KICK":
var channel, user string
if err := parseMessageParams(msg, &channel, &user); err != nil {
return err
}
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if uc.isOurNick(user) {
uc.logger.Printf("kicked from channel %q by %s", channel, msg.Prefix.Name)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
if uch := uc.channels.Get(channel); uch != nil {
uc.channels.Del(channel)
uch.Members.ForEach(func(nick string, memberships *xirc.MembershipSet) {
if !uc.shouldCacheUserInfo(nick) {
uc.users.Del(nick)
}
})
}
} else {
ch, err := uc.getChannel(channel)
if err != nil {
return err
}
ch.Members.Del(user)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
if !uc.shouldCacheUserInfo(user) {
uc.users.Del(user)
}
}
uc.produce(channel, msg, 0)
2020-03-06 17:51:11 +00:00
case "QUIT":
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if uc.isOurNick(msg.Prefix.Name) {
2020-03-06 17:51:11 +00:00
uc.logger.Printf("quit")
}
2023-03-01 12:15:38 +00:00
uc.channels.ForEach(func(_ string, ch *upstreamChannel) {
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if ch.Members.Has(msg.Prefix.Name) {
ch.Members.Del(msg.Prefix.Name)
uc.appendLog(ch.Name, msg)
}
})
2020-03-06 17:51:11 +00:00
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
uc.users.Del(msg.Prefix.Name)
2020-03-06 17:51:11 +00:00
if msg.Prefix.Name != uc.nick {
uc.forwardMessage(ctx, msg)
2020-03-06 17:51:11 +00:00
}
2020-02-06 18:22:04 +00:00
case irc.RPL_TOPIC, irc.RPL_NOTOPIC:
2020-02-07 11:36:02 +00:00
var name, topic string
if err := parseMessageParams(msg, nil, &name, &topic); err != nil {
return err
2020-02-06 18:22:04 +00:00
}
ch, err := uc.getChannel(name)
2020-02-06 18:22:04 +00:00
if err != nil {
return err
}
if msg.Command == irc.RPL_TOPIC {
2020-02-07 11:36:02 +00:00
ch.Topic = topic
2020-02-06 18:22:04 +00:00
} else {
ch.Topic = ""
}
case "TOPIC":
2020-02-07 11:36:02 +00:00
var name string
if err := parseMessageParams(msg, &name); err != nil {
2020-02-07 11:36:02 +00:00
return err
2020-02-06 18:22:04 +00:00
}
ch, err := uc.getChannel(name)
2020-02-06 18:22:04 +00:00
if err != nil {
return err
}
if len(msg.Params) > 1 {
ch.Topic = msg.Params[1]
ch.TopicWho = msg.Prefix.Copy()
ch.TopicTime = time.Now() // TODO use msg.Tags["time"]
2020-02-06 18:22:04 +00:00
} else {
ch.Topic = ""
}
uc.produce(ch.Name, msg, 0)
case "MODE":
var name, modeStr string
if err := parseMessageParams(msg, &name, &modeStr); err != nil {
return err
}
if !uc.isChannel(name) { // user mode change
if name != uc.nick {
return fmt.Errorf("received MODE message for unknown nick %q", name)
}
if err := uc.modes.Apply(modeStr); err != nil {
return err
}
uc.forwardMessage(ctx, msg)
} else { // channel mode change
ch, err := uc.getChannel(name)
if err != nil {
return err
}
err = applyChannelModes(ch, modeStr, msg.Params[2:])
if err != nil {
return err
}
uc.appendLog(ch.Name, msg)
c := uc.network.channels.Get(name)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if c == nil || !c.Detached {
uc.forwardMessage(ctx, msg)
}
}
case irc.RPL_UMODEIS:
if err := parseMessageParams(msg, nil); err != nil {
return err
}
modeStr := ""
if len(msg.Params) > 1 {
modeStr = msg.Params[1]
}
uc.modes = ""
if err := uc.modes.Apply(modeStr); err != nil {
return err
}
uc.forwardMessage(ctx, msg)
case irc.RPL_CHANNELMODEIS:
var channel string
if err := parseMessageParams(msg, nil, &channel); err != nil {
return err
}
modeStr := ""
if len(msg.Params) > 2 {
modeStr = msg.Params[2]
}
ch, err := uc.getChannel(channel)
if err != nil {
return err
}
firstMode := ch.modes == nil
ch.modes = make(map[byte]string)
if err := applyChannelModes(ch, modeStr, msg.Params[3:]); err != nil {
return err
}
c := uc.network.channels.Get(channel)
if firstMode && (c == nil || !c.Detached) {
uc.forwardMessage(ctx, msg)
}
2022-05-09 15:18:51 +00:00
case xirc.RPL_CREATIONTIME:
2020-03-26 04:51:47 +00:00
var channel, creationTime string
if err := parseMessageParams(msg, nil, &channel, &creationTime); err != nil {
return err
}
ch, err := uc.getChannel(channel)
if err != nil {
return err
}
firstCreationTime := ch.creationTime == ""
ch.creationTime = creationTime
c := uc.network.channels.Get(channel)
if firstCreationTime && (c == nil || !c.Detached) {
uc.forwardMessage(ctx, msg)
2020-03-26 04:51:47 +00:00
}
2022-05-09 15:18:51 +00:00
case xirc.RPL_TOPICWHOTIME:
var channel, who, timeStr string
if err := parseMessageParams(msg, nil, &channel, &who, &timeStr); err != nil {
2020-02-07 11:36:02 +00:00
return err
2020-02-06 18:22:04 +00:00
}
ch, err := uc.getChannel(channel)
2020-02-06 18:22:04 +00:00
if err != nil {
return err
}
firstTopicWhoTime := ch.TopicWho == nil
ch.TopicWho = irc.ParsePrefix(who)
2020-02-07 11:36:02 +00:00
sec, err := strconv.ParseInt(timeStr, 10, 64)
2020-02-06 18:22:04 +00:00
if err != nil {
return fmt.Errorf("failed to parse topic time: %v", err)
}
ch.TopicTime = time.Unix(sec, 0)
c := uc.network.channels.Get(channel)
if firstTopicWhoTime && (c == nil || !c.Detached) {
uc.forwardMessage(ctx, msg)
}
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
case irc.RPL_LIST:
dc, cmd := uc.currentPendingCommand("LIST")
if cmd == nil {
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
return fmt.Errorf("unexpected RPL_LIST: no matching pending LIST")
} else if dc == nil {
return nil
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
dc.SendMessage(ctx, msg)
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
case irc.RPL_LISTEND:
dc, cmd := uc.dequeueCommand("LIST")
if cmd == nil {
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
return fmt.Errorf("unexpected RPL_LISTEND: no matching pending LIST")
} else if dc == nil {
return nil
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
}
dc.SendMessage(ctx, msg)
2020-02-06 18:22:04 +00:00
case irc.RPL_NAMREPLY:
2020-02-07 11:36:02 +00:00
var name, statusStr, members string
if err := parseMessageParams(msg, nil, &statusStr, &name, &members); err != nil {
return err
2020-02-06 18:22:04 +00:00
}
ch := uc.channels.Get(name)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if ch == nil {
// NAMES on a channel we have not joined, forward to downstream
uc.forwardMsgByID(ctx, downstreamID, msg)
return nil
2020-02-06 18:22:04 +00:00
}
2022-05-29 15:28:25 +00:00
status, err := xirc.ParseChannelStatus(statusStr)
2020-02-06 18:22:04 +00:00
if err != nil {
return err
}
ch.Status = status
for _, s := range splitSpace(members) {
memberships, nick := uc.parseMembershipPrefix(s)
ch.Members.Set(nick, &memberships)
2020-02-06 18:22:04 +00:00
}
case irc.RPL_ENDOFNAMES:
2020-02-07 11:36:02 +00:00
var name string
if err := parseMessageParams(msg, nil, &name); err != nil {
return err
}
ch := uc.channels.Get(name)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if ch == nil {
// NAMES on a channel we have not joined, forward to downstream
uc.forwardMsgByID(ctx, downstreamID, msg)
return nil
}
if ch.complete {
return fmt.Errorf("received unexpected RPL_ENDOFNAMES")
}
ch.complete = true
2020-02-06 21:19:31 +00:00
c := uc.network.channels.Get(name)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if c == nil || !c.Detached {
uc.forEachDownstream(func(dc *downstreamConn) {
forwardChannel(ctx, dc, ch)
})
}
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
case irc.RPL_WHOREPLY:
var username, host, server, nick, flags, trailing string
if err := parseMessageParams(msg, nil, nil, &username, &host, &server, &nick, &flags, &trailing); err != nil {
return err
}
dc, cmd := uc.currentPendingCommand("WHO")
if cmd == nil {
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
return fmt.Errorf("unexpected RPL_WHOREPLY: no matching pending WHO")
} else if dc == nil {
return nil
}
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
parts := strings.SplitN(trailing, " ", 2)
if len(parts) != 2 {
return fmt.Errorf("malformed RPL_WHOREPLY: failed to parse real name")
}
realname := parts[1]
dc.SendMessage(ctx, msg)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
if uc.shouldCacheUserInfo(nick) {
uc.cacheUserInfo(nick, &upstreamUser{
Username: username,
Hostname: host,
Server: server,
Nickname: nick,
Flags: flags,
Realname: realname,
})
}
case xirc.RPL_WHOSPCRPL:
dc, cmd := uc.currentPendingCommand("WHO")
if cmd == nil {
return fmt.Errorf("unexpected RPL_WHOSPCRPL: no matching pending WHO")
} else if dc == nil {
return nil
}
dc.SendMessage(ctx, msg)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
if len(cmd.Params) > 1 {
fields, _ := xirc.ParseWHOXOptions(cmd.Params[1])
if strings.IndexByte(fields, 'n') < 0 {
return nil
}
info, err := xirc.ParseWHOXReply(msg, fields)
if err != nil {
return err
}
if uc.shouldCacheUserInfo(info.Nickname) {
uc.cacheUserInfo(info.Nickname, &upstreamUser{
Nickname: info.Nickname,
Username: info.Username,
Hostname: info.Hostname,
Server: info.Server,
Flags: info.Flags,
Account: info.Account,
Realname: info.Realname,
})
}
}
2020-03-19 23:23:19 +00:00
case irc.RPL_ENDOFWHO:
dc, cmd := uc.dequeueCommand("WHO")
if cmd == nil {
// Some servers send RPL_TRYAGAIN followed by RPL_ENDOFWHO
return nil
} else if dc == nil {
// Downstream connection is gone
return nil
}
dc.SendMessage(ctx, msg)
case xirc.RPL_WHOISCERTFP, xirc.RPL_WHOISREGNICK, irc.RPL_WHOISUSER, irc.RPL_WHOISSERVER, irc.RPL_WHOISCHANNELS, irc.RPL_WHOISOPERATOR, irc.RPL_WHOISIDLE, xirc.RPL_WHOISSPECIAL, xirc.RPL_WHOISACCOUNT, xirc.RPL_WHOISACTUALLY, xirc.RPL_WHOISHOST, xirc.RPL_WHOISMODES, xirc.RPL_WHOISSECURE:
dc, cmd := uc.currentPendingCommand("WHOIS")
if cmd == nil {
return fmt.Errorf("unexpected WHOIS reply %q: no matching pending WHOIS", msg.Command)
} else if dc == nil {
return nil
}
dc.SendMessage(ctx, msg)
2020-03-20 01:15:23 +00:00
case irc.RPL_ENDOFWHOIS:
dc, cmd := uc.dequeueCommand("WHOIS")
if cmd == nil {
return fmt.Errorf("unexpected RPL_ENDOFWHOIS: no matching pending WHOIS")
} else if dc == nil {
return nil
}
dc.SendMessage(ctx, msg)
2020-03-18 02:11:38 +00:00
case "INVITE":
var nick, channel string
2020-03-18 02:11:38 +00:00
if err := parseMessageParams(msg, &nick, &channel); err != nil {
return err
}
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
weAreInvited := uc.isOurNick(nick)
2020-03-18 02:11:38 +00:00
uc.forEachDownstream(func(dc *downstreamConn) {
2022-03-14 18:15:35 +00:00
if !weAreInvited && !dc.caps.IsEnabled("invite-notify") {
return
}
dc.SendMessage(ctx, msg)
2020-03-18 02:11:38 +00:00
})
if weAreInvited {
go uc.network.broadcastWebPush(msg)
}
2020-03-26 05:03:07 +00:00
case irc.RPL_INVITING:
var nick, channel string
if err := parseMessageParams(msg, nil, &nick, &channel); err != nil {
2020-03-26 05:03:07 +00:00
return err
}
uc.forwardMsgByID(ctx, downstreamID, msg)
case irc.RPL_MONONLINE, irc.RPL_MONOFFLINE:
var targetsStr string
if err := parseMessageParams(msg, nil, &targetsStr); err != nil {
return err
}
targets := strings.Split(targetsStr, ",")
online := msg.Command == irc.RPL_MONONLINE
for _, target := range targets {
prefix := irc.ParsePrefix(target)
uc.monitored.Set(prefix.Name, online)
}
// Check if the nick we want is now free
2022-05-09 10:34:43 +00:00
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
if !online && !uc.isOurNick(wantNick) && !uc.hasDesiredNick {
found := false
for _, target := range targets {
prefix := irc.ParsePrefix(target)
if uc.network.equalCasemap(prefix.Name, wantNick) {
found = true
break
}
}
if found {
uc.logger.Printf("desired nick %q is now available", wantNick)
uc.SendMessage(ctx, &irc.Message{
Command: "NICK",
Params: []string{wantNick},
})
}
}
uc.forEachDownstream(func(dc *downstreamConn) {
for _, target := range targets {
prefix := irc.ParsePrefix(target)
if dc.monitored.Has(prefix.Name) {
dc.SendMessage(ctx, &irc.Message{
Prefix: dc.srv.prefix(),
Command: msg.Command,
Params: []string{dc.nick, target},
})
}
}
})
case irc.ERR_MONLISTFULL:
var limit, targetsStr string
if err := parseMessageParams(msg, nil, &limit, &targetsStr); err != nil {
return err
}
targets := strings.Split(targetsStr, ",")
uc.forEachDownstream(func(dc *downstreamConn) {
for _, target := range targets {
if dc.monitored.Has(target) {
dc.SendMessage(ctx, &irc.Message{
Prefix: dc.srv.prefix(),
Command: msg.Command,
Params: []string{dc.nick, limit, target},
})
}
}
})
2020-04-29 12:53:48 +00:00
case irc.RPL_AWAY:
uc.forwardMsgByID(ctx, downstreamID, msg)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
case "AWAY":
// Update user flags, if we already have the flags cached
uu := uc.users.Get(msg.Prefix.Name)
if uu != nil && uu.Flags != "" {
flags := uu.Flags
if isAway := len(msg.Params) > 0; isAway {
flags = strings.ReplaceAll(flags, "H", "G")
} else {
flags = strings.ReplaceAll(flags, "G", "H")
}
uc.cacheUserInfo(msg.Prefix.Name, &upstreamUser{
Flags: flags,
})
}
uc.forwardMessage(ctx, msg)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
case "ACCOUNT":
var account string
if err := parseMessageParams(msg, &account); err != nil {
return err
}
uc.cacheUserInfo(msg.Prefix.Name, &upstreamUser{
Account: account,
})
uc.forwardMessage(ctx, msg)
case irc.RPL_BANLIST, irc.RPL_INVITELIST, irc.RPL_EXCEPTLIST, irc.RPL_ENDOFBANLIST, irc.RPL_ENDOFINVITELIST, irc.RPL_ENDOFEXCEPTLIST:
uc.forwardMsgByID(ctx, downstreamID, msg)
case irc.ERR_NOSUCHNICK:
var nick, reason string
if err := parseMessageParams(msg, nil, &nick, &reason); err != nil {
return err
}
cm := uc.network.casemap
dc, cmd := uc.currentPendingCommand("WHOIS")
if cmd != nil && cm(cmd.Params[len(cmd.Params)-1]) == cm(nick) {
uc.dequeueCommand("WHOIS")
if dc != nil {
dc.SendMessage(ctx, msg)
}
2023-04-03 20:35:16 +00:00
} else {
uc.forwardMsgByID(ctx, downstreamID, msg)
}
case xirc.ERR_UNKNOWNERROR, irc.ERR_UNKNOWNCOMMAND, irc.ERR_NEEDMOREPARAMS, irc.RPL_TRYAGAIN:
var command, reason string
if err := parseMessageParams(msg, nil, &command, &reason); err != nil {
return err
}
if dc, _ := uc.dequeueCommand(command); dc != nil && downstreamID == 0 {
downstreamID = dc.id
}
if command == "AUTHENTICATE" {
uc.saslClient = nil
uc.saslStarted = false
}
uc.forwardMsgByID(ctx, downstreamID, msg)
case "FAIL":
var command, code string
if err := parseMessageParams(msg, &command, &code); err != nil {
return err
}
if !uc.registered && command == "*" && code == "ACCOUNT_REQUIRED" {
return registrationError{msg}
}
if dc, _ := uc.dequeueCommand(command); dc != nil && downstreamID == 0 {
downstreamID = dc.id
}
uc.forwardMsgByID(ctx, downstreamID, msg)
case "ACK":
// Ignore
case irc.RPL_NOWAWAY, irc.RPL_UNAWAY:
// Ignore
2020-02-06 16:04:49 +00:00
case irc.RPL_YOURHOST, irc.RPL_CREATED:
2020-02-06 15:39:09 +00:00
// Ignore
case irc.RPL_LUSERCLIENT, irc.RPL_LUSEROP, irc.RPL_LUSERUNKNOWN, irc.RPL_LUSERCHANNELS, irc.RPL_LUSERME:
fallthrough
2022-05-09 15:18:51 +00:00
case irc.RPL_STATSVLINE, xirc.RPL_STATSPING, irc.RPL_STATSBLINE, irc.RPL_STATSDLINE:
fallthrough
2022-05-09 15:18:51 +00:00
case xirc.RPL_LOCALUSERS, xirc.RPL_GLOBALUSERS:
fallthrough
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
case irc.RPL_MOTDSTART, irc.RPL_MOTD:
// Ignore these messages if they're part of the initial registration
// message burst. Forward them if the user explicitly asked for them.
if !uc.gotMotd {
return nil
}
uc.forwardMsgByID(ctx, downstreamID, msg)
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
case irc.RPL_LISTSTART:
// Ignore
2020-08-13 13:30:41 +00:00
case "ERROR":
var text string
if err := parseMessageParams(msg, &text); err != nil {
return err
}
return fmt.Errorf("fatal server error: %v", text)
case irc.ERR_NICKNAMEINUSE:
// At this point, we haven't received ISUPPORT so we don't know the
// maximum nickname length or whether the server supports MONITOR. Many
// servers have NICKLEN=30 so let's just use that.
if !uc.registered && len(uc.nick)+1 < 30 {
uc.nick = uc.nick + "_"
uc.hasDesiredNick = false
uc.logger.Printf("desired nick is not available, falling back to %q", uc.nick)
uc.SendMessage(ctx, &irc.Message{
Command: "NICK",
Params: []string{uc.nick},
})
return nil
}
var failedNick string
if err := parseMessageParams(msg, nil, &failedNick); err != nil {
return err
}
if uc.network.equalCasemap(uc.pendingRegainNick, failedNick) {
// This message comes from our own logic to try to regain our
// desired nick, don't relay to downstream connections
uc.pendingRegainNick = ""
return nil
}
fallthrough
case irc.ERR_PASSWDMISMATCH, irc.ERR_ERRONEUSNICKNAME, irc.ERR_NICKCOLLISION, irc.ERR_UNAVAILRESOURCE, irc.ERR_NOPERMFORHOST, irc.ERR_YOUREBANNEDCREEP:
if !uc.registered {
return registrationError{msg}
}
uc.forwardMsgByID(ctx, downstreamID, msg)
default:
uc.logger.Printf("unhandled message: %v", msg)
uc.forwardMsgByID(ctx, downstreamID, msg)
}
2020-02-06 15:39:09 +00:00
return nil
}
2022-05-09 10:34:43 +00:00
func (uc *upstreamConn) handleDetachedMessage(ctx context.Context, ch *database.Channel, msg *irc.Message) {
if uc.network.detachedMessageNeedsRelay(ch, msg) {
uc.forEachDownstream(func(dc *downstreamConn) {
dc.relayDetachedMessage(uc.network, msg)
})
}
2022-05-09 10:34:43 +00:00
if ch.ReattachOn == database.FilterMessage || (ch.ReattachOn == database.FilterHighlight && uc.network.isHighlight(msg)) {
uc.network.attach(ctx, ch)
if err := uc.srv.db.StoreChannel(ctx, uc.network.ID, ch); err != nil {
uc.logger.Printf("failed to update channel %q: %v", ch.Name, err)
}
}
}
func (uc *upstreamConn) handleChanModes(s string) error {
parts := strings.SplitN(s, ",", 5)
if len(parts) < 4 {
return fmt.Errorf("malformed ISUPPORT CHANMODES value: %v", s)
}
modes := make(map[byte]channelModeType)
for i, mt := range []channelModeType{modeTypeA, modeTypeB, modeTypeC, modeTypeD} {
for j := 0; j < len(parts[i]); j++ {
mode := parts[i][j]
modes[mode] = mt
}
}
uc.availableChannelModes = modes
return nil
}
func (uc *upstreamConn) handleMemberships(s string) error {
if s == "" {
uc.availableMemberships = nil
return nil
}
if s[0] != '(' {
return fmt.Errorf("malformed ISUPPORT PREFIX value: %v", s)
}
sep := strings.IndexByte(s, ')')
if sep < 0 || len(s) != sep*2 {
return fmt.Errorf("malformed ISUPPORT PREFIX value: %v", s)
}
2022-05-30 07:12:28 +00:00
memberships := make([]xirc.Membership, len(s)/2-1)
for i := range memberships {
2022-05-30 07:12:28 +00:00
memberships[i] = xirc.Membership{
Mode: s[i+1],
Prefix: s[sep+i+1],
}
}
uc.availableMemberships = memberships
return nil
}
2020-04-30 13:27:41 +00:00
func (uc *upstreamConn) handleSupportedCaps(capsStr string) {
caps := strings.Fields(capsStr)
for _, s := range caps {
kv := strings.SplitN(s, "=", 2)
k := strings.ToLower(kv[0])
var v string
if len(kv) == 2 {
v = kv[1]
}
2022-03-14 18:24:39 +00:00
uc.caps.Available[k] = v
2020-04-30 13:27:41 +00:00
}
}
func (uc *upstreamConn) updateCaps(ctx context.Context) {
2020-04-30 13:27:41 +00:00
var requestCaps []string
2020-04-30 14:10:39 +00:00
for c := range permanentUpstreamCaps {
2022-03-14 18:24:39 +00:00
if uc.caps.IsAvailable(c) && !uc.caps.IsEnabled(c) {
2020-04-30 13:27:41 +00:00
requestCaps = append(requestCaps, c)
}
}
echoMessage := uc.caps.IsAvailable("labeled-response")
if !uc.caps.IsEnabled("echo-message") && echoMessage {
requestCaps = append(requestCaps, "echo-message")
} else if uc.caps.IsEnabled("echo-message") && !echoMessage {
requestCaps = append(requestCaps, "-echo-message")
}
2020-04-30 14:10:39 +00:00
if len(requestCaps) == 0 {
return
}
uc.SendMessage(ctx, &irc.Message{
2020-04-30 14:10:39 +00:00
Command: "CAP",
Params: []string{"REQ", strings.Join(requestCaps, " ")},
})
}
func (uc *upstreamConn) supportsSASL(mech string) bool {
2022-03-14 18:24:39 +00:00
v, ok := uc.caps.Available["sasl"]
2020-04-30 14:10:39 +00:00
if !ok {
return false
}
if v == "" {
return true
}
mechanisms := strings.Split(v, ",")
for _, mech := range mechanisms {
if strings.EqualFold(mech, mech) {
return true
2020-04-30 14:10:39 +00:00
}
}
return false
}
2020-04-30 14:10:39 +00:00
func (uc *upstreamConn) requestSASL() bool {
if uc.network.SASL.Mechanism == "" {
return false
}
return uc.supportsSASL(uc.network.SASL.Mechanism)
2020-04-30 14:10:39 +00:00
}
func (uc *upstreamConn) handleCapAck(ctx context.Context, name string, ok bool) error {
2022-03-14 18:24:39 +00:00
uc.caps.SetEnabled(name, ok)
2020-04-30 14:10:39 +00:00
switch name {
case "sasl":
if !uc.requestSASL() {
return nil
}
2020-04-30 14:10:39 +00:00
if !ok {
uc.logger.Printf("server refused to acknowledge the SASL capability")
return nil
}
auth := &uc.network.SASL
switch auth.Mechanism {
case "PLAIN":
uc.logger.Printf("starting SASL PLAIN authentication with username %q", auth.Plain.Username)
uc.saslClient = sasl.NewPlainClient("", auth.Plain.Username, auth.Plain.Password)
case "EXTERNAL":
uc.logger.Printf("starting SASL EXTERNAL authentication")
uc.saslClient = sasl.NewExternalClient("")
2020-04-30 14:10:39 +00:00
default:
return fmt.Errorf("unsupported SASL mechanism %q", name)
}
uc.SendMessage(ctx, &irc.Message{
2020-04-30 14:10:39 +00:00
Command: "AUTHENTICATE",
Params: []string{auth.Mechanism},
2020-04-30 13:27:41 +00:00
})
case "echo-message":
2020-04-30 14:10:39 +00:00
default:
if permanentUpstreamCaps[name] {
break
}
uc.logger.Printf("received CAP ACK/NAK for a cap we don't support: %v", name)
2020-04-30 13:27:41 +00:00
}
2020-04-30 14:10:39 +00:00
return nil
2020-04-30 13:27:41 +00:00
}
func splitSpace(s string) []string {
return strings.FieldsFunc(s, func(r rune) bool {
return r == ' '
})
}
2022-02-08 15:38:34 +00:00
func (uc *upstreamConn) register(ctx context.Context) {
2022-05-09 10:34:43 +00:00
uc.nick = database.GetNick(&uc.user.User, &uc.network.Network)
uc.username = database.GetUsername(&uc.user.User, &uc.network.Network)
uc.realname = database.GetRealname(&uc.user.User, &uc.network.Network)
uc.SendMessage(ctx, &irc.Message{
2020-03-13 10:26:43 +00:00
Command: "CAP",
Params: []string{"LS", "302"},
})
2020-03-13 11:06:02 +00:00
if uc.network.Pass != "" {
uc.SendMessage(ctx, &irc.Message{
2020-03-13 11:06:02 +00:00
Command: "PASS",
Params: []string{uc.network.Pass},
})
}
uc.SendMessage(ctx, &irc.Message{
Command: "NICK",
Params: []string{uc.nick},
})
uc.SendMessage(ctx, &irc.Message{
Command: "USER",
Params: []string{uc.username, "0", "*", uc.realname},
})
2020-02-07 11:37:44 +00:00
}
2021-11-15 20:11:23 +00:00
func (uc *upstreamConn) ReadMessage() (*irc.Message, error) {
msg, err := uc.conn.ReadMessage()
if err != nil {
return nil, err
}
uc.srv.metrics.upstreamInMessagesTotal.Inc()
return msg, nil
}
func (uc *upstreamConn) runUntilRegistered(ctx context.Context) error {
2020-04-01 10:14:36 +00:00
for !uc.registered {
msg, err := uc.ReadMessage()
2020-04-01 10:14:36 +00:00
if err != nil {
return fmt.Errorf("failed to read message: %v", err)
}
if err := uc.handleMessage(ctx, msg); err != nil {
if _, ok := err.(registrationError); ok {
return err
} else {
msg.Tags = nil // prevent message tags from cluttering logs
return fmt.Errorf("failed to handle message %q: %v", msg, err)
}
2020-04-01 10:14:36 +00:00
}
}
for _, command := range uc.network.ConnectCommands {
m, err := irc.ParseMessage(command)
if err != nil {
uc.logger.Printf("failed to parse connect command %q: %v", command, err)
} else {
uc.SendMessage(ctx, m)
}
}
2020-04-01 10:14:36 +00:00
return nil
}
func (uc *upstreamConn) readMessages(ch chan<- event) error {
for {
msg, err := uc.ReadMessage()
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return fmt.Errorf("failed to read IRC command: %v", err)
}
ch <- eventUpstreamMessage{msg, uc}
}
return nil
}
func (uc *upstreamConn) SendMessage(ctx context.Context, msg *irc.Message) {
2022-03-14 18:24:39 +00:00
if !uc.caps.IsEnabled("message-tags") {
msg = msg.Copy()
msg.Tags = nil
}
2021-11-15 20:11:23 +00:00
uc.srv.metrics.upstreamOutMessagesTotal.Inc()
uc.conn.SendMessage(ctx, msg)
}
func (uc *upstreamConn) SendMessageLabeled(ctx context.Context, downstreamID uint64, msg *irc.Message) {
2022-03-14 18:24:39 +00:00
if uc.caps.IsEnabled("labeled-response") {
if msg.Tags == nil {
2022-11-14 11:06:58 +00:00
msg.Tags = make(irc.Tags)
}
2022-11-14 11:06:58 +00:00
msg.Tags["label"] = fmt.Sprintf("sd-%d-%d", downstreamID, uc.nextLabelID)
uc.nextLabelID++
}
uc.SendMessage(ctx, msg)
}
// appendLog appends a message to the log file.
//
// The internal message ID is returned. If the message isn't recorded in the
// log file, an empty string is returned.
func (uc *upstreamConn) appendLog(entity string, msg *irc.Message) (msgID string) {
if uc.user.msgStore == nil {
return ""
}
if msg.Command == "TAGMSG" {
store := false
for tag := range storableMessageTags {
if _, ok := msg.Tags[tag]; ok {
store = true
break
}
}
if !store {
return ""
}
}
// Don't store messages with a server mask target
if strings.HasPrefix(entity, "$") {
return ""
}
entityCM := uc.network.casemap(entity)
if entityCM == "nickserv" {
// The messages sent/received from NickServ may contain
// security-related information (like passwords). Don't store these.
return ""
}
if !uc.network.delivered.HasTarget(entity) {
// This is the first message we receive from this target. Save the last
// message ID in delivery receipts, so that we can send the new message
// in the backlog if an offline client reconnects.
lastID, err := uc.user.msgStore.LastMsgID(&uc.network.Network, entityCM, time.Now())
if err != nil {
uc.logger.Printf("failed to log message: failed to get last message ID: %v", err)
return ""
}
uc.network.delivered.ForEachClient(func(clientName string) {
uc.network.delivered.StoreID(entity, clientName, lastID)
})
}
msgID, err := uc.user.msgStore.Append(&uc.network.Network, entityCM, msg)
if err != nil {
uc.logger.Printf("failed to append message to store: %v", err)
return ""
}
return msgID
}
// produce appends a message to the logs and forwards it to connected downstream
// connections.
//
// originID is the id of the downstream (origin) that sent the message. If it is not 0
// and origin doesn't support echo-message, the message is forwarded to all
// connections except origin.
func (uc *upstreamConn) produce(target string, msg *irc.Message, originID uint64) {
var msgID string
2020-04-06 19:42:55 +00:00
if target != "" {
msgID = uc.appendLog(target, msg)
2020-04-06 19:42:55 +00:00
}
// Don't forward messages if it's a detached channel
ch := uc.network.channels.Get(target)
detached := ch != nil && ch.Detached
ctx := context.TODO()
uc.forEachDownstream(func(dc *downstreamConn) {
echo := dc.id == originID && msg.Prefix != nil && uc.isOurNick(msg.Prefix.Name)
if !detached && (!echo || dc.caps.IsEnabled("echo-message")) {
dc.sendMessageWithID(ctx, msg, msgID)
} else {
dc.advanceMessageWithID(ctx, msg, msgID)
}
})
}
func (uc *upstreamConn) updateAway() {
ctx := context.TODO()
if !uc.network.AutoAway {
return
}
away := true
uc.forEachDownstream(func(dc *downstreamConn) {
if dc.away == nil {
away = false
}
})
if away == uc.away {
return
}
if away {
uc.SendMessage(ctx, &irc.Message{
Command: "AWAY",
Params: []string{"Auto away"},
})
} else {
uc.SendMessage(ctx, &irc.Message{
Command: "AWAY",
})
}
uc.away = away
}
func (uc *upstreamConn) updateChannelAutoDetach(name string) {
uch := uc.channels.Get(name)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if uch == nil {
return
}
ch := uc.network.channels.Get(name)
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
if ch == nil || ch.Detached {
return
}
Implement casemapping TL;DR: supports for casemapping, now logs are saved in casemapped/canonical/tolower form (eg. in the #channel directory instead of #Channel... or something) == What is casemapping? == see <https://modern.ircdocs.horse/#casemapping-parameter> == Casemapping and multi-upstream == Since each upstream does not necessarily use the same casemapping, and since casemappings cannot coexist [0], 1. soju must also update the database accordingly to upstreams' casemapping, otherwise it will end up inconsistent, 2. soju must "normalize" entity names and expose only one casemapping that is a subset of all supported casemappings (here, ascii). [0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same user (upstreams that advertise rfc1459 for example), while on others (upstreams that advertise ascii) they don't. Once upstream's casemapping is known (default to rfc1459), entity names in map keys are made into casemapped form, for upstreamConn, upstreamChannel and network. downstreamConn advertises "CASEMAPPING=ascii", and always casemap map keys with ascii. Some functions require the caller to casemap their argument (to avoid needless calls to casemapping functions). == Message forwarding and casemapping == downstream message handling (joins and parts basically): When relaying entity names from downstreams to upstreams, soju uses the upstream casemapping, in order to not get in the way of the user. This does not brings any issue, as long as soju replies with the ascii casemapping in mind (solves point 1.). marshalEntity/marshalUserPrefix: When relaying entity names from upstreams with non-ascii casemappings, soju *partially* casemap them: it only change the case of characters which are not ascii letters. ASCII case is thus kept intact, while special symbols like []{} are the same every time soju sends them to downstreams (solves point 2.). == Casemapping changes == Casemapping changes are not fully supported by this patch and will result in loss of history. This is a limitation of the protocol and should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
uch.updateAutoDetach(ch.DetachAfter)
}
func (uc *upstreamConn) updateMonitor() {
if _, ok := uc.isupport["MONITOR"]; !ok {
return
}
ctx := context.TODO()
add := make(map[string]struct{})
var addList []string
seen := make(map[string]struct{})
uc.forEachDownstream(func(dc *downstreamConn) {
2023-03-01 12:52:33 +00:00
dc.monitored.ForEach(func(target string, _ struct{}) {
targetCM := uc.network.casemap(target)
if targetCM == serviceNickCM {
2023-03-01 12:52:33 +00:00
return
}
if !uc.monitored.Has(targetCM) {
if _, ok := add[targetCM]; !ok {
addList = append(addList, targetCM)
add[targetCM] = struct{}{}
}
} else {
seen[targetCM] = struct{}{}
}
2023-03-01 12:52:33 +00:00
})
})
2022-05-09 10:34:43 +00:00
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
wantNickCM := uc.network.casemap(wantNick)
2022-08-23 07:53:11 +00:00
if _, ok := add[wantNickCM]; !ok && !uc.monitored.Has(wantNick) && !uc.isOurNick(wantNick) && !uc.hasDesiredNick {
addList = append(addList, wantNickCM)
add[wantNickCM] = struct{}{}
}
removeAll := true
var removeList []string
uc.monitored.ForEach(func(nick string, online bool) {
if _, ok := seen[uc.network.casemap(nick)]; ok {
removeAll = false
} else {
removeList = append(removeList, nick)
}
})
// TODO: better handle the case where len(uc.monitored) + len(addList)
// exceeds the limit, probably by immediately sending ERR_MONLISTFULL?
if removeAll && len(addList) == 0 && len(removeList) > 0 {
// Optimization when the last MONITOR-aware downstream disconnects
uc.SendMessage(ctx, &irc.Message{
Command: "MONITOR",
Params: []string{"C"},
})
} else {
msgs := xirc.GenerateMonitor("-", removeList)
msgs = append(msgs, xirc.GenerateMonitor("+", addList)...)
for _, msg := range msgs {
uc.SendMessage(ctx, msg)
}
}
for _, target := range removeList {
uc.monitored.Del(target)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
if !uc.shouldCacheUserInfo(target) {
uc.users.Del(target)
}
}
}
func (uc *upstreamConn) stopRegainNickTimer() {
if uc.regainNickTimer != nil {
uc.regainNickTimer.Stop()
// Maybe we're racing with the timer goroutine, so maybe we'll receive
// an eventTryRegainNick later on, but tryRegainNick handles that case
}
uc.regainNickTimer = nil
uc.regainNickBackoff = nil
}
func (uc *upstreamConn) startRegainNickTimer() {
if uc.regainNickBackoff != nil || uc.regainNickTimer != nil {
panic("startRegainNickTimer called twice")
}
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
if uc.isOurNick(wantNick) {
return
}
const (
min = 15 * time.Second
max = 10 * time.Minute
jitter = 10 * time.Second
)
uc.regainNickBackoff = newBackoffer(min, max, jitter)
uc.regainNickTimer = time.AfterFunc(uc.regainNickBackoff.Next(), func() {
e := eventTryRegainNick{uc: uc, nick: wantNick}
select {
case uc.network.user.events <- e:
// ok
default:
uc.logger.Printf("skipping nick regain attempt: event queue is full")
}
})
}
func (uc *upstreamConn) tryRegainNick(nick string) {
ctx := context.TODO()
if uc.regainNickTimer == nil {
return
}
// Maybe the user has updated their desired nick
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
if wantNick != nick || uc.isOurNick(wantNick) {
uc.stopRegainNickTimer()
return
}
uc.regainNickTimer.Reset(uc.regainNickBackoff.Next())
if uc.pendingRegainNick != "" {
return
}
uc.SendMessage(ctx, &irc.Message{
Command: "NICK",
Params: []string{wantNick},
})
uc.pendingRegainNick = wantNick
}
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
func (uc *upstreamConn) getCachedWHO(mask, fields string) (l []*upstreamUser, ok bool) {
// Non-extended WHO fields
if fields == "" {
fields = "cuhsnfdr"
}
// Some extensions are required to keep our cached state in sync. We could
// require setname for 'r' and chghost for 'h'/'s', but servers usually
// implement a QUIT/JOIN fallback, so let's not bother.
// TODO: Avoid storing fields we cannot keep up to date, instead of storing them
// then failing here. eg if we don't have account-notify, avoid storing the ACCOUNT
// in the first place.
if strings.IndexByte(fields, 'a') >= 0 && !uc.caps.IsEnabled("account-notify") {
return nil, false
}
if strings.IndexByte(fields, 'f') >= 0 && !uc.caps.IsEnabled("away-notify") {
return nil, false
}
if uu := uc.users.Get(mask); uu != nil {
if uu.hasWHOXFields(fields) {
return []*upstreamUser{uu}, true
}
} else if uch := uc.channels.Get(mask); uch != nil {
l = make([]*upstreamUser, 0, uch.Members.Len())
ok = true
uch.Members.ForEach(func(nick string, membershipSet *xirc.MembershipSet) {
if !ok {
return
}
uu := uc.users.Get(nick)
if uu == nil || !uu.hasWHOXFields(fields) {
ok = false
} else {
l = append(l, uu)
}
})
if !ok {
return nil, false
}
return l, true
}
return nil, false
}
func (uc *upstreamConn) cacheUserInfo(nick string, info *upstreamUser) {
if nick == "" {
panic("cacheUserInfo called with empty nickname")
}
uu := uc.users.Get(nick)
if uu == nil {
if info.Nickname != "" {
nick = info.Nickname
} else {
info.Nickname = nick
}
2023-03-01 12:15:38 +00:00
uc.users.Set(info.Nickname, info)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
} else {
uu.updateFrom(info)
if info.Nickname != "" && nick != info.Nickname {
uc.users.Del(nick)
2023-03-01 12:15:38 +00:00
uc.users.Set(uu.Nickname, uu)
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
}
}
}
func (uc *upstreamConn) shouldCacheUserInfo(nick string) bool {
if uc.isOurNick(nick) {
return true
}
// keep the cached user info only if we MONITOR it, or we share a channel with them
if uc.monitored.Has(nick) {
return true
}
found := false
2023-03-01 12:15:38 +00:00
uc.channels.ForEach(func(_ string, ch *upstreamChannel) {
Add WHO cache This adds a new field to upstreams, members, which is a casemapped map of upstream users known to the soju. The upstream users known to soju are: self, any monitored user, and any user with whom we share a channel. The information stored for each upstream user corresponds to the info that can be returned by a WHO/WHOX command. We build the upstream user information both incrementally, capturing information contained in JOIN and AWAY messages; and with the bulk user information contained in WHO replies we receive. This lets us build a user cache that can then be used to return synthetic WHO responses to later WHO requests by downstreams. This is useful because some networks (eg Libera) heavily throttle WHO commands, and without this cache, any downstream connecting would send 1 WHO command per channel, so possibly more than a dozen WHO commands, which soju then forwarded to the upstream as WHO commands. With this cache most WHO commands can be cached and avoid sending WHO commands to the upstream. In order to cache the "flags" field, we synthetize the field from user info we get from incremental messages: away status (H/G) and bot status (B). This could result in incorrect values for proprietary user fields. Support for the server-operator status (*) is also not supported. Of note is that it is difficult to obtain a user "connected server" field incrementally, so clients that want to maximize their WHO cache hit ratio can use WHOX to only request fields they need, and in particular not include the server field flag. Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
found = found || ch.Members.Has(nick)
})
return found
}