2020-03-13 17:13:03 +00:00
|
|
|
package soju
|
2020-02-06 15:18:19 +00:00
|
|
|
|
|
|
|
import (
|
2021-10-18 17:15:15 +00:00
|
|
|
"context"
|
2020-05-29 11:10:54 +00:00
|
|
|
"crypto"
|
|
|
|
"crypto/sha256"
|
2020-02-06 15:18:19 +00:00
|
|
|
"crypto/tls"
|
2020-05-29 11:10:54 +00:00
|
|
|
"crypto/x509"
|
2020-03-13 14:12:44 +00:00
|
|
|
"encoding/base64"
|
2020-03-23 02:21:43 +00:00
|
|
|
"errors"
|
2020-02-06 15:18:19 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net"
|
2020-02-06 18:22:04 +00:00
|
|
|
"strconv"
|
2020-02-06 17:07:35 +00:00
|
|
|
"strings"
|
2020-02-06 18:22:04 +00:00
|
|
|
"time"
|
2020-02-06 15:18:19 +00:00
|
|
|
|
2020-03-13 14:12:44 +00:00
|
|
|
"github.com/emersion/go-sasl"
|
2020-02-06 15:18:19 +00:00
|
|
|
"gopkg.in/irc.v3"
|
2022-05-09 10:34:43 +00:00
|
|
|
|
|
|
|
"git.sr.ht/~emersion/soju/database"
|
2022-05-09 14:15:00 +00:00
|
|
|
"git.sr.ht/~emersion/soju/xirc"
|
2020-02-06 15:18:19 +00:00
|
|
|
)
|
|
|
|
|
2020-04-30 14:10:39 +00:00
|
|
|
// permanentUpstreamCaps is the static list of upstream capabilities always
|
|
|
|
// requested when supported.
|
|
|
|
var permanentUpstreamCaps = map[string]bool{
|
2021-11-19 10:55:22 +00:00
|
|
|
"account-notify": true,
|
2021-06-14 19:44:38 +00:00
|
|
|
"account-tag": true,
|
2020-04-30 14:10:39 +00:00
|
|
|
"away-notify": true,
|
|
|
|
"batch": true,
|
2022-03-21 15:30:58 +00:00
|
|
|
"chghost": true,
|
2020-09-08 17:49:06 +00:00
|
|
|
"extended-join": true,
|
2021-01-21 08:22:15 +00:00
|
|
|
"invite-notify": true,
|
2020-04-30 14:10:39 +00:00
|
|
|
"labeled-response": true,
|
|
|
|
"message-tags": true,
|
2020-04-30 21:39:59 +00:00
|
|
|
"multi-prefix": true,
|
2021-11-21 15:10:54 +00:00
|
|
|
"sasl": true,
|
2020-04-30 14:10:39 +00:00
|
|
|
"server-time": true,
|
2021-05-25 18:24:45 +00:00
|
|
|
"setname": true,
|
2021-11-15 13:38:19 +00:00
|
|
|
|
2021-11-30 10:54:11 +00:00
|
|
|
"draft/account-registration": true,
|
|
|
|
"draft/extended-monitor": true,
|
2020-04-30 14:10:39 +00:00
|
|
|
}
|
|
|
|
|
2021-12-02 16:33:11 +00:00
|
|
|
type registrationError struct {
|
|
|
|
*irc.Message
|
|
|
|
}
|
2020-08-19 21:35:12 +00:00
|
|
|
|
|
|
|
func (err registrationError) Error() string {
|
2021-12-02 16:33:11 +00:00
|
|
|
return fmt.Sprintf("registration error (%v): %v", err.Command, err.Reason())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (err registrationError) Reason() string {
|
|
|
|
if len(err.Params) > 0 {
|
|
|
|
return err.Params[len(err.Params)-1]
|
|
|
|
}
|
|
|
|
return err.Command
|
|
|
|
}
|
|
|
|
|
|
|
|
func (err registrationError) Temporary() bool {
|
|
|
|
// Only return false if we're 100% sure that fixing the error requires a
|
|
|
|
// network configuration change
|
2021-12-02 16:58:56 +00:00
|
|
|
switch err.Command {
|
|
|
|
case irc.ERR_PASSWDMISMATCH, irc.ERR_ERRONEUSNICKNAME:
|
|
|
|
return false
|
|
|
|
case "FAIL":
|
|
|
|
return err.Params[1] != "ACCOUNT_REQUIRED"
|
|
|
|
default:
|
|
|
|
return true
|
|
|
|
}
|
2020-08-19 21:35:12 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 18:22:04 +00:00
|
|
|
type upstreamChannel struct {
|
2020-03-26 04:51:47 +00:00
|
|
|
Name string
|
|
|
|
conn *upstreamConn
|
|
|
|
Topic string
|
2020-08-20 08:39:23 +00:00
|
|
|
TopicWho *irc.Prefix
|
2020-03-26 04:51:47 +00:00
|
|
|
TopicTime time.Time
|
2022-05-29 15:28:25 +00:00
|
|
|
Status xirc.ChannelStatus
|
2020-03-26 04:51:47 +00:00
|
|
|
modes channelModes
|
|
|
|
creationTime string
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
Members membershipsCasemapMap
|
2020-03-26 04:51:47 +00:00
|
|
|
complete bool
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
detachTimer *time.Timer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (uc *upstreamChannel) updateAutoDetach(dur time.Duration) {
|
|
|
|
if uc.detachTimer != nil {
|
|
|
|
uc.detachTimer.Stop()
|
|
|
|
uc.detachTimer = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if dur == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.detachTimer = time.AfterFunc(dur, func() {
|
|
|
|
uc.conn.network.user.events <- eventChannelDetach{
|
|
|
|
uc: uc.conn,
|
|
|
|
name: uc.Name,
|
|
|
|
}
|
|
|
|
})
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
|
|
|
|
2022-05-30 07:45:40 +00:00
|
|
|
type upstreamBatch struct {
|
|
|
|
Type string
|
|
|
|
Params []string
|
|
|
|
Outer *upstreamBatch // if not-nil, this batch is nested in Outer
|
|
|
|
Label string
|
|
|
|
}
|
|
|
|
|
2021-11-09 20:32:26 +00:00
|
|
|
type pendingUpstreamCommand struct {
|
|
|
|
downstreamID uint64
|
2021-11-09 21:09:17 +00:00
|
|
|
msg *irc.Message
|
2022-08-17 13:43:50 +00:00
|
|
|
sentAt time.Time
|
2021-11-09 20:32:26 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 15:18:19 +00:00
|
|
|
type upstreamConn struct {
|
2020-04-03 14:34:11 +00:00
|
|
|
conn
|
|
|
|
|
|
|
|
network *network
|
|
|
|
user *user
|
2020-02-06 16:04:49 +00:00
|
|
|
|
2022-03-21 16:01:15 +00:00
|
|
|
serverPrefix *irc.Prefix
|
2020-02-06 16:04:49 +00:00
|
|
|
serverName string
|
|
|
|
availableUserModes string
|
2020-03-20 23:48:19 +00:00
|
|
|
availableChannelModes map[byte]channelModeType
|
|
|
|
availableChannelTypes string
|
2022-05-30 07:12:28 +00:00
|
|
|
availableMemberships []xirc.Membership
|
2021-03-15 22:06:36 +00:00
|
|
|
isupport map[string]*string
|
2020-02-06 18:22:04 +00:00
|
|
|
|
2022-03-14 18:24:39 +00:00
|
|
|
registered bool
|
|
|
|
nick string
|
|
|
|
username string
|
|
|
|
realname string
|
2022-03-21 15:09:45 +00:00
|
|
|
hostname string
|
2022-03-14 18:24:39 +00:00
|
|
|
modes userModes
|
|
|
|
channels upstreamChannelCasemapMap
|
2022-05-29 16:26:28 +00:00
|
|
|
caps xirc.CapRegistry
|
2022-05-30 07:45:40 +00:00
|
|
|
batches map[string]upstreamBatch
|
2022-03-14 18:24:39 +00:00
|
|
|
away bool
|
|
|
|
account string
|
|
|
|
nextLabelID uint64
|
|
|
|
monitored monitorCasemapMap
|
2020-03-23 00:34:34 +00:00
|
|
|
|
2020-03-13 14:12:44 +00:00
|
|
|
saslClient sasl.Client
|
|
|
|
saslStarted bool
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
casemapIsSet bool
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
// Queue of commands in progress, indexed by type. The first entry has been
|
|
|
|
// sent to the server and is awaiting reply. The following entries have not
|
|
|
|
// been sent yet.
|
|
|
|
pendingCmds map[string][]pendingUpstreamCommand
|
2021-06-09 19:25:15 +00:00
|
|
|
|
2022-07-14 10:25:47 +00:00
|
|
|
pendingRegainNick string
|
|
|
|
regainNickTimer *time.Timer
|
|
|
|
regainNickBackoff *backoffer
|
|
|
|
|
2021-06-09 19:25:15 +00:00
|
|
|
gotMotd bool
|
2022-08-22 19:59:52 +00:00
|
|
|
|
|
|
|
hasDesiredNick bool
|
2020-03-25 22:51:28 +00:00
|
|
|
}
|
|
|
|
|
2021-12-02 09:53:43 +00:00
|
|
|
func connectToUpstream(ctx context.Context, network *network) (*upstreamConn, error) {
|
2021-04-13 18:16:37 +00:00
|
|
|
logger := &prefixLogger{network.user.logger, fmt.Sprintf("upstream %q: ", network.GetName())}
|
2020-02-06 21:46:46 +00:00
|
|
|
|
2022-04-14 17:42:02 +00:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, connectTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
var dialer net.Dialer
|
2020-04-27 16:02:33 +00:00
|
|
|
|
2021-03-09 17:54:38 +00:00
|
|
|
u, err := network.URL()
|
2020-07-06 14:59:14 +00:00
|
|
|
if err != nil {
|
2021-03-09 17:54:38 +00:00
|
|
|
return nil, err
|
2020-07-06 14:59:14 +00:00
|
|
|
}
|
2020-04-01 14:41:17 +00:00
|
|
|
|
2020-04-27 16:02:33 +00:00
|
|
|
var netConn net.Conn
|
2020-07-06 14:59:14 +00:00
|
|
|
switch u.Scheme {
|
2020-04-27 16:02:33 +00:00
|
|
|
case "ircs":
|
2020-07-06 14:59:14 +00:00
|
|
|
addr := u.Host
|
2020-07-06 15:37:52 +00:00
|
|
|
host, _, err := net.SplitHostPort(u.Host)
|
|
|
|
if err != nil {
|
|
|
|
host = u.Host
|
|
|
|
addr = u.Host + ":6697"
|
2020-04-27 16:02:33 +00:00
|
|
|
}
|
|
|
|
|
2021-12-02 09:53:43 +00:00
|
|
|
dialer.LocalAddr, err = network.user.localTCPAddrForHost(ctx, host)
|
2021-10-21 17:14:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to pick local IP for remote host %q: %v", host, err)
|
|
|
|
}
|
|
|
|
|
2020-04-27 16:02:33 +00:00
|
|
|
logger.Printf("connecting to TLS server at address %q", addr)
|
2020-05-29 11:10:54 +00:00
|
|
|
|
2021-02-24 18:41:12 +00:00
|
|
|
tlsConfig := &tls.Config{ServerName: host, NextProtos: []string{"irc"}}
|
2020-05-29 11:10:54 +00:00
|
|
|
if network.SASL.Mechanism == "EXTERNAL" {
|
|
|
|
if network.SASL.External.CertBlob == nil {
|
|
|
|
return nil, fmt.Errorf("missing certificate for authentication")
|
|
|
|
}
|
|
|
|
if network.SASL.External.PrivKeyBlob == nil {
|
|
|
|
return nil, fmt.Errorf("missing private key for authentication")
|
|
|
|
}
|
|
|
|
key, err := x509.ParsePKCS8PrivateKey(network.SASL.External.PrivKeyBlob)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to parse private key: %v", err)
|
|
|
|
}
|
2020-07-06 15:37:52 +00:00
|
|
|
tlsConfig.Certificates = []tls.Certificate{
|
|
|
|
{
|
|
|
|
Certificate: [][]byte{network.SASL.External.CertBlob},
|
|
|
|
PrivateKey: key.(crypto.PrivateKey),
|
2020-05-29 11:10:54 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
logger.Printf("using TLS client certificate %x", sha256.Sum256(network.SASL.External.CertBlob))
|
|
|
|
}
|
|
|
|
|
2021-12-02 09:53:43 +00:00
|
|
|
netConn, err = dialer.DialContext(ctx, "tcp", addr)
|
2020-07-06 14:59:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to dial %q: %v", addr, err)
|
|
|
|
}
|
2020-07-06 15:37:52 +00:00
|
|
|
|
|
|
|
// Don't do the TLS handshake immediately, because we need to register
|
|
|
|
// the new connection with identd ASAP. See:
|
|
|
|
// https://todo.sr.ht/~emersion/soju/69#event-41859
|
|
|
|
netConn = tls.Client(netConn, tlsConfig)
|
2020-04-27 16:05:28 +00:00
|
|
|
case "irc+insecure":
|
2020-07-06 14:59:14 +00:00
|
|
|
addr := u.Host
|
2021-10-21 17:14:39 +00:00
|
|
|
host, _, err := net.SplitHostPort(addr)
|
|
|
|
if err != nil {
|
|
|
|
host = u.Host
|
|
|
|
addr = u.Host + ":6667"
|
|
|
|
}
|
|
|
|
|
2021-12-02 09:53:43 +00:00
|
|
|
dialer.LocalAddr, err = network.user.localTCPAddrForHost(ctx, host)
|
2021-10-21 17:14:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to pick local IP for remote host %q: %v", host, err)
|
2020-04-27 16:05:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
logger.Printf("connecting to plain-text server at address %q", addr)
|
2021-12-02 09:53:43 +00:00
|
|
|
netConn, err = dialer.DialContext(ctx, "tcp", addr)
|
2020-07-06 14:59:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to dial %q: %v", addr, err)
|
|
|
|
}
|
2020-07-22 13:44:19 +00:00
|
|
|
case "irc+unix", "unix":
|
2020-07-06 15:31:11 +00:00
|
|
|
logger.Printf("connecting to Unix socket at path %q", u.Path)
|
2021-12-02 09:53:43 +00:00
|
|
|
netConn, err = dialer.DialContext(ctx, "unix", u.Path)
|
2020-07-06 15:31:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to connect to Unix socket %q: %v", u.Path, err)
|
|
|
|
}
|
2020-04-27 16:02:33 +00:00
|
|
|
default:
|
2020-07-06 14:59:14 +00:00
|
|
|
return nil, fmt.Errorf("failed to dial %q: unknown scheme: %v", network.Addr, u.Scheme)
|
2020-02-06 21:46:46 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 17:28:29 +00:00
|
|
|
options := connOptions{
|
2020-08-20 07:13:56 +00:00
|
|
|
Logger: logger,
|
2020-08-19 17:28:29 +00:00
|
|
|
RateLimitDelay: upstreamMessageDelay,
|
|
|
|
RateLimitBurst: upstreamMessageBurst,
|
|
|
|
}
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
uc := &upstreamConn{
|
2021-11-09 20:32:26 +00:00
|
|
|
conn: *newConn(network.user.srv, newNetIRCConn(netConn), &options),
|
|
|
|
network: network,
|
|
|
|
user: network.user,
|
2022-06-06 07:23:17 +00:00
|
|
|
channels: upstreamChannelCasemapMap{newCasemapMap()},
|
2022-05-29 16:26:28 +00:00
|
|
|
caps: xirc.NewCapRegistry(),
|
2022-05-30 07:45:40 +00:00
|
|
|
batches: make(map[string]upstreamBatch),
|
2022-03-21 16:01:15 +00:00
|
|
|
serverPrefix: &irc.Prefix{Name: "*"},
|
2021-11-09 20:32:26 +00:00
|
|
|
availableChannelTypes: stdChannelTypes,
|
|
|
|
availableChannelModes: stdChannelModes,
|
|
|
|
availableMemberships: stdMemberships,
|
|
|
|
isupport: make(map[string]*string),
|
2021-11-09 21:09:17 +00:00
|
|
|
pendingCmds: make(map[string][]pendingUpstreamCommand),
|
2022-06-06 07:23:17 +00:00
|
|
|
monitored: monitorCasemapMap{newCasemapMap()},
|
2022-08-22 19:59:52 +00:00
|
|
|
hasDesiredNick: true,
|
2020-02-06 21:46:46 +00:00
|
|
|
}
|
2020-02-17 11:36:42 +00:00
|
|
|
return uc, nil
|
2020-02-06 21:46:46 +00:00
|
|
|
}
|
|
|
|
|
2020-03-04 14:44:13 +00:00
|
|
|
func (uc *upstreamConn) forEachDownstream(f func(*downstreamConn)) {
|
2020-04-04 02:48:25 +00:00
|
|
|
uc.network.forEachDownstream(f)
|
2020-03-04 14:44:13 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 01:39:04 +00:00
|
|
|
func (uc *upstreamConn) forEachDownstreamByID(id uint64, f func(*downstreamConn)) {
|
2020-03-23 02:21:43 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
if id != 0 && id != dc.id {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f(dc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
func (uc *upstreamConn) downstreamByID(id uint64) *downstreamConn {
|
|
|
|
for _, dc := range uc.user.downstreamConns {
|
|
|
|
if dc.id == id {
|
|
|
|
return dc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
func (uc *upstreamConn) getChannel(name string) (*upstreamChannel, error) {
|
2022-06-06 07:58:39 +00:00
|
|
|
ch := uc.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if ch == nil {
|
2020-02-06 18:22:04 +00:00
|
|
|
return nil, fmt.Errorf("unknown channel %q", name)
|
|
|
|
}
|
|
|
|
return ch, nil
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
|
|
|
|
2020-03-20 02:05:14 +00:00
|
|
|
func (uc *upstreamConn) isChannel(entity string) bool {
|
2022-05-07 07:28:53 +00:00
|
|
|
return len(entity) > 0 && strings.ContainsRune(uc.availableChannelTypes, rune(entity[0]))
|
2020-03-20 02:05:14 +00:00
|
|
|
}
|
|
|
|
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
func (uc *upstreamConn) isOurNick(nick string) bool {
|
2022-07-08 12:52:10 +00:00
|
|
|
return uc.network.equalCasemap(uc.nick, nick)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
}
|
|
|
|
|
2021-12-06 21:33:50 +00:00
|
|
|
func (uc *upstreamConn) abortPendingCommands() {
|
2021-11-09 21:09:17 +00:00
|
|
|
for _, l := range uc.pendingCmds {
|
|
|
|
for _, pendingCmd := range l {
|
|
|
|
dc := uc.downstreamByID(pendingCmd.downstreamID)
|
|
|
|
if dc == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch pendingCmd.msg.Command {
|
|
|
|
case "LIST":
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_LISTEND,
|
2021-12-06 21:33:50 +00:00
|
|
|
Params: []string{dc.nick, "Command aborted"},
|
2021-11-09 21:09:17 +00:00
|
|
|
})
|
|
|
|
case "WHO":
|
|
|
|
mask := "*"
|
|
|
|
if len(pendingCmd.msg.Params) > 0 {
|
|
|
|
mask = pendingCmd.msg.Params[0]
|
|
|
|
}
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_ENDOFWHO,
|
2021-12-06 21:33:50 +00:00
|
|
|
Params: []string{dc.nick, mask, "Command aborted"},
|
2021-11-09 21:09:17 +00:00
|
|
|
})
|
upstream: fix missing WHOIS in abortPendingCommands
Fixes the following panic:
2022/05/03 08:05:32 panic serving user "asdf": Unsupported pending command "WHOIS"
goroutine 15 [running]:
runtime/debug.Stack()
/opt/go/src/runtime/debug/stack.go:24 +0x65
git.sr.ht/~emersion/soju.(*Server).addUserLocked.func1.1()
~/soju/server.go:317 +0x5d
panic({0xa18da0, 0x6815bf10})
/opt/go/src/runtime/panic.go:838 +0x207
git.sr.ht/~emersion/soju.(*upstreamConn).abortPendingCommands(0x6811c9c0)
~/soju/upstream.go:338 +0x953
git.sr.ht/~emersion/soju.(*user).handleUpstreamDisconnected(0x680b7080, 0x6811c9c0)
~/soju/user.go:744 +0x6d
git.sr.ht/~emersion/soju.(*user).updateNetwork(0x680b7080, {0xb99a00, 0x684690e0}, 0x681343c0)
~/soju/user.go:936 +0x387
git.sr.ht/~emersion/soju.handleServiceNetworkUpdate({0xb99a00, 0x684690e0}, 0x68116000, {0x681b8a20?, 0x40429108?, 0x10?})
~/soju/service.go:590 +0x14f
git.sr.ht/~emersion/soju.handleServicePRIVMSG({0xb99a00, 0x684690e0}, 0x68116000, {0x680af5d5?, 0x1?})
~/soju/service.go:146 +0x7df
git.sr.ht/~emersion/soju.(*downstreamConn).handleMessageRegistered(0x68116000, {0xb99a00, 0x684690e0}, 0x681b8940)
~/soju/downstream.go:2503 +0x9e9e
git.sr.ht/~emersion/soju.(*downstreamConn).handleMessage(0x68116000, {0xb999c8?, 0x680240a0?}, 0x681b8940)
~/soju/downstream.go:727 +0xde
git.sr.ht/~emersion/soju.(*user).run(0x680b7080)
~/soju/user.go:690 +0xe05
git.sr.ht/~emersion/soju.(*Server).addUserLocked.func1()
~/soju/server.go:327 +0x70
created by git.sr.ht/~emersion/soju.(*Server).addUserLocked
~/soju/server.go:314 +0x178
2022-05-03 06:32:59 +00:00
|
|
|
case "WHOIS":
|
|
|
|
nick := pendingCmd.msg.Params[len(pendingCmd.msg.Params)-1]
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_ENDOFWHOIS,
|
|
|
|
Params: []string{dc.nick, nick, "Command aborted"},
|
|
|
|
})
|
2021-11-21 15:10:54 +00:00
|
|
|
case "AUTHENTICATE":
|
|
|
|
dc.endSASL(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.ERR_SASLABORTED,
|
|
|
|
Params: []string{dc.nick, "SASL authentication aborted"},
|
|
|
|
})
|
2021-11-30 10:54:11 +00:00
|
|
|
case "REGISTER", "VERIFY":
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: "FAIL",
|
|
|
|
Params: []string{pendingCmd.msg.Command, "TEMPORARILY_UNAVAILABLE", pendingCmd.msg.Params[0], "Command aborted"},
|
|
|
|
})
|
2021-11-09 21:09:17 +00:00
|
|
|
default:
|
|
|
|
panic(fmt.Errorf("Unsupported pending command %q", pendingCmd.msg.Command))
|
|
|
|
}
|
|
|
|
}
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
2021-11-09 21:09:17 +00:00
|
|
|
|
|
|
|
uc.pendingCmds = make(map[string][]pendingUpstreamCommand)
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
func (uc *upstreamConn) sendNextPendingCommand(cmd string) {
|
|
|
|
if len(uc.pendingCmds[cmd]) == 0 {
|
2021-11-09 20:32:26 +00:00
|
|
|
return
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
2022-08-17 13:43:50 +00:00
|
|
|
pendingCmd := &uc.pendingCmds[cmd][0]
|
2022-04-04 07:55:32 +00:00
|
|
|
uc.SendMessageLabeled(context.TODO(), pendingCmd.downstreamID, pendingCmd.msg)
|
2022-08-17 13:43:50 +00:00
|
|
|
pendingCmd.sentAt = time.Now()
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
func (uc *upstreamConn) enqueueCommand(dc *downstreamConn, msg *irc.Message) {
|
|
|
|
switch msg.Command {
|
2022-04-04 07:57:08 +00:00
|
|
|
case "LIST", "WHO", "WHOIS", "AUTHENTICATE", "REGISTER", "VERIFY":
|
2021-11-09 21:09:17 +00:00
|
|
|
// Supported
|
|
|
|
default:
|
|
|
|
panic(fmt.Errorf("Unsupported pending command %q", msg.Command))
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.pendingCmds[msg.Command] = append(uc.pendingCmds[msg.Command], pendingUpstreamCommand{
|
2021-11-09 20:32:26 +00:00
|
|
|
downstreamID: dc.id,
|
2021-11-09 21:09:17 +00:00
|
|
|
msg: msg,
|
2021-11-09 20:32:26 +00:00
|
|
|
})
|
|
|
|
|
2022-08-17 13:43:50 +00:00
|
|
|
// If we didn't get a reply after a while, just give up
|
|
|
|
// TODO: consider sending an abort reply to downstream
|
|
|
|
if t := uc.pendingCmds[msg.Command][0].sentAt; !t.IsZero() && time.Since(t) > 30*time.Second {
|
|
|
|
copy(uc.pendingCmds[msg.Command], uc.pendingCmds[msg.Command][1:])
|
|
|
|
}
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
if len(uc.pendingCmds[msg.Command]) == 1 {
|
|
|
|
uc.sendNextPendingCommand(msg.Command)
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
2021-11-09 20:32:26 +00:00
|
|
|
}
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
func (uc *upstreamConn) currentPendingCommand(cmd string) (*downstreamConn, *irc.Message) {
|
|
|
|
if len(uc.pendingCmds[cmd]) == 0 {
|
2021-11-09 20:32:26 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
pendingCmd := uc.pendingCmds[cmd][0]
|
|
|
|
return uc.downstreamByID(pendingCmd.downstreamID), pendingCmd.msg
|
2021-11-09 20:32:26 +00:00
|
|
|
}
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
func (uc *upstreamConn) dequeueCommand(cmd string) (*downstreamConn, *irc.Message) {
|
|
|
|
dc, msg := uc.currentPendingCommand(cmd)
|
2021-11-09 20:32:26 +00:00
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
if len(uc.pendingCmds[cmd]) > 0 {
|
|
|
|
copy(uc.pendingCmds[cmd], uc.pendingCmds[cmd][1:])
|
|
|
|
uc.pendingCmds[cmd] = uc.pendingCmds[cmd][:len(uc.pendingCmds[cmd])-1]
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
2021-11-09 20:32:26 +00:00
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
uc.sendNextPendingCommand(cmd)
|
2021-11-09 20:32:26 +00:00
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
return dc, msg
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
|
|
|
|
2021-12-02 18:29:44 +00:00
|
|
|
func (uc *upstreamConn) cancelPendingCommandsByDownstreamID(downstreamID uint64) {
|
|
|
|
for cmd := range uc.pendingCmds {
|
|
|
|
// We can't cancel the currently running command stored in
|
|
|
|
// uc.pendingCmds[cmd][0]
|
|
|
|
for i := len(uc.pendingCmds[cmd]) - 1; i >= 1; i-- {
|
|
|
|
if uc.pendingCmds[cmd][i].downstreamID == downstreamID {
|
|
|
|
uc.pendingCmds[cmd] = append(uc.pendingCmds[cmd][:i], uc.pendingCmds[cmd][i+1:]...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-30 07:12:28 +00:00
|
|
|
func (uc *upstreamConn) parseMembershipPrefix(s string) (ms xirc.MembershipSet, nick string) {
|
|
|
|
var memberships xirc.MembershipSet
|
2020-04-30 21:39:59 +00:00
|
|
|
i := 0
|
2020-03-20 23:48:19 +00:00
|
|
|
for _, m := range uc.availableMemberships {
|
2020-04-30 21:39:59 +00:00
|
|
|
if i >= len(s) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if s[i] == m.Prefix {
|
|
|
|
memberships = append(memberships, m)
|
|
|
|
i++
|
2020-03-20 23:48:19 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-30 07:12:28 +00:00
|
|
|
return memberships, s[i:]
|
2020-03-20 23:48:19 +00:00
|
|
|
}
|
|
|
|
|
2021-12-02 22:27:12 +00:00
|
|
|
func (uc *upstreamConn) handleMessage(ctx context.Context, msg *irc.Message) error {
|
2020-03-23 02:21:43 +00:00
|
|
|
var label string
|
|
|
|
if l, ok := msg.GetTag("label"); ok {
|
|
|
|
label = l
|
2021-05-20 13:43:21 +00:00
|
|
|
delete(msg.Tags, "label")
|
2020-03-23 02:21:43 +00:00
|
|
|
}
|
|
|
|
|
2022-05-30 07:45:40 +00:00
|
|
|
var msgBatch *upstreamBatch
|
2020-03-23 02:18:16 +00:00
|
|
|
if batchName, ok := msg.GetTag("batch"); ok {
|
|
|
|
b, ok := uc.batches[batchName]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unexpected batch reference: batch was not defined: %q", batchName)
|
|
|
|
}
|
|
|
|
msgBatch = &b
|
2020-03-23 02:21:43 +00:00
|
|
|
if label == "" {
|
|
|
|
label = msgBatch.Label
|
|
|
|
}
|
2021-01-06 15:46:54 +00:00
|
|
|
delete(msg.Tags, "batch")
|
2020-03-23 02:21:43 +00:00
|
|
|
}
|
|
|
|
|
2022-03-22 20:10:06 +00:00
|
|
|
var downstreamID uint64
|
2020-03-23 02:21:43 +00:00
|
|
|
if label != "" {
|
|
|
|
var labelOffset uint64
|
2020-03-26 01:39:04 +00:00
|
|
|
n, err := fmt.Sscanf(label, "sd-%d-%d", &downstreamID, &labelOffset)
|
2020-03-23 02:21:43 +00:00
|
|
|
if err == nil && n < 2 {
|
|
|
|
err = errors.New("not enough arguments")
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unexpected message label: invalid downstream reference for label %q: %v", label, err)
|
|
|
|
}
|
2020-03-23 02:18:16 +00:00
|
|
|
}
|
|
|
|
|
2022-03-21 16:01:15 +00:00
|
|
|
if msg.Prefix == nil {
|
|
|
|
msg.Prefix = uc.serverPrefix
|
|
|
|
}
|
|
|
|
|
2022-03-22 20:14:02 +00:00
|
|
|
if _, ok := msg.Tags["time"]; !ok && !isNumeric(msg.Command) {
|
2022-05-09 14:15:00 +00:00
|
|
|
msg.Tags["time"] = irc.TagValue(xirc.FormatServerTime(time.Now()))
|
2020-04-03 18:48:23 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 15:18:19 +00:00
|
|
|
switch msg.Command {
|
|
|
|
case "PING":
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-02-06 15:18:19 +00:00
|
|
|
Command: "PONG",
|
2020-02-18 19:40:32 +00:00
|
|
|
Params: msg.Params,
|
2020-02-17 15:17:31 +00:00
|
|
|
})
|
2020-02-06 21:46:46 +00:00
|
|
|
return nil
|
2020-05-21 05:04:34 +00:00
|
|
|
case "NOTICE", "PRIVMSG", "TAGMSG":
|
2022-06-24 11:29:56 +00:00
|
|
|
var target, text string
|
2020-05-21 05:04:34 +00:00
|
|
|
if msg.Command != "TAGMSG" {
|
2022-06-24 11:29:56 +00:00
|
|
|
if err := parseMessageParams(msg, &target, &text); err != nil {
|
2020-05-21 05:04:34 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2022-06-24 11:29:56 +00:00
|
|
|
if err := parseMessageParams(msg, &target); err != nil {
|
2020-05-21 05:04:34 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-05-01 16:12:47 +00:00
|
|
|
}
|
|
|
|
|
2022-07-08 12:47:43 +00:00
|
|
|
if uc.network.equalCasemap(msg.Prefix.Name, serviceNick) {
|
2020-05-01 16:12:47 +00:00
|
|
|
uc.logger.Printf("skipping %v from soju's service: %v", msg.Command, msg)
|
|
|
|
break
|
|
|
|
}
|
2022-07-08 12:47:43 +00:00
|
|
|
if uc.network.equalCasemap(target, serviceNick) {
|
2020-05-01 16:12:47 +00:00
|
|
|
uc.logger.Printf("skipping %v to soju's service: %v", msg.Command, msg)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2022-07-13 07:02:03 +00:00
|
|
|
if !uc.registered || uc.network.equalCasemap(msg.Prefix.Name, uc.serverPrefix.Name) || target == "*" || strings.HasPrefix(target, "$") {
|
2022-06-24 11:41:37 +00:00
|
|
|
// This is a server message
|
2022-04-10 16:05:12 +00:00
|
|
|
uc.produce("", msg, 0)
|
2022-06-24 11:41:37 +00:00
|
|
|
break
|
|
|
|
}
|
2020-05-01 17:05:20 +00:00
|
|
|
|
2022-06-24 11:41:37 +00:00
|
|
|
bufferName := target
|
|
|
|
if uc.isOurNick(target) {
|
|
|
|
bufferName = msg.Prefix.Name
|
|
|
|
}
|
2022-06-24 11:47:27 +00:00
|
|
|
if t, ok := msg.Tags["+draft/channel-context"]; ok {
|
|
|
|
ch := uc.channels.Get(string(t))
|
|
|
|
if ch != nil && ch.Members.Has(msg.Prefix.Name) {
|
|
|
|
bufferName = ch.Name
|
|
|
|
}
|
|
|
|
}
|
2022-04-10 16:05:12 +00:00
|
|
|
|
2022-06-24 11:41:37 +00:00
|
|
|
self := uc.isOurNick(msg.Prefix.Name)
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
|
2022-06-24 11:41:37 +00:00
|
|
|
ch := uc.network.channels.Get(bufferName)
|
2022-07-08 14:55:29 +00:00
|
|
|
highlight := false
|
2022-06-24 11:41:37 +00:00
|
|
|
if ch != nil && msg.Command != "TAGMSG" && !self {
|
|
|
|
if ch.Detached {
|
|
|
|
uc.handleDetachedMessage(ctx, ch, msg)
|
|
|
|
}
|
|
|
|
|
2022-07-08 14:55:29 +00:00
|
|
|
highlight = uc.network.isHighlight(msg)
|
2022-06-24 11:41:37 +00:00
|
|
|
if ch.DetachOn == database.FilterMessage || ch.DetachOn == database.FilterDefault || (ch.DetachOn == database.FilterHighlight && highlight) {
|
|
|
|
uc.updateChannelAutoDetach(bufferName)
|
2021-11-27 10:48:10 +00:00
|
|
|
}
|
2020-03-26 04:53:13 +00:00
|
|
|
}
|
2022-07-08 14:55:29 +00:00
|
|
|
|
|
|
|
if highlight || uc.isOurNick(target) {
|
2022-08-17 14:09:12 +00:00
|
|
|
go uc.network.broadcastWebPush(msg)
|
2022-07-08 14:55:29 +00:00
|
|
|
uc.network.pushTargets.Add(bufferName)
|
2022-06-24 11:41:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uc.produce(bufferName, msg, downstreamID)
|
2020-03-13 10:26:43 +00:00
|
|
|
case "CAP":
|
2020-03-13 14:12:44 +00:00
|
|
|
var subCmd string
|
|
|
|
if err := parseMessageParams(msg, nil, &subCmd); err != nil {
|
|
|
|
return err
|
2020-03-13 10:26:43 +00:00
|
|
|
}
|
2020-03-13 14:12:44 +00:00
|
|
|
subCmd = strings.ToUpper(subCmd)
|
|
|
|
subParams := msg.Params[2:]
|
|
|
|
switch subCmd {
|
|
|
|
case "LS":
|
|
|
|
if len(subParams) < 1 {
|
|
|
|
return newNeedMoreParamsError(msg.Command)
|
|
|
|
}
|
2020-04-30 13:27:41 +00:00
|
|
|
caps := subParams[len(subParams)-1]
|
2020-03-13 14:12:44 +00:00
|
|
|
more := len(subParams) >= 2 && msg.Params[len(subParams)-2] == "*"
|
|
|
|
|
2020-04-30 13:27:41 +00:00
|
|
|
uc.handleSupportedCaps(caps)
|
2020-03-13 14:12:44 +00:00
|
|
|
|
|
|
|
if more {
|
|
|
|
break // wait to receive all capabilities
|
|
|
|
}
|
|
|
|
|
2022-04-10 16:05:12 +00:00
|
|
|
uc.updateCaps(ctx)
|
2020-03-23 00:34:34 +00:00
|
|
|
|
|
|
|
if uc.requestSASL() {
|
2020-03-13 14:12:44 +00:00
|
|
|
break // we'll send CAP END after authentication is completed
|
|
|
|
}
|
2020-03-13 10:26:43 +00:00
|
|
|
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-03-13 10:26:43 +00:00
|
|
|
Command: "CAP",
|
|
|
|
Params: []string{"END"},
|
|
|
|
})
|
2020-03-13 14:12:44 +00:00
|
|
|
case "ACK", "NAK":
|
|
|
|
if len(subParams) < 1 {
|
|
|
|
return newNeedMoreParamsError(msg.Command)
|
|
|
|
}
|
|
|
|
caps := strings.Fields(subParams[0])
|
|
|
|
|
|
|
|
for _, name := range caps {
|
2022-04-11 15:20:51 +00:00
|
|
|
enable := subCmd == "ACK"
|
|
|
|
if strings.HasPrefix(name, "-") {
|
|
|
|
name = strings.TrimPrefix(name, "-")
|
|
|
|
enable = false
|
|
|
|
}
|
|
|
|
if err := uc.handleCapAck(ctx, strings.ToLower(name), enable); err != nil {
|
2020-03-13 14:12:44 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 13:27:41 +00:00
|
|
|
if uc.registered {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateSupportedCaps()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
case "NEW":
|
|
|
|
if len(subParams) < 1 {
|
|
|
|
return newNeedMoreParamsError(msg.Command)
|
|
|
|
}
|
|
|
|
uc.handleSupportedCaps(subParams[0])
|
2022-04-10 16:05:12 +00:00
|
|
|
uc.updateCaps(ctx)
|
2020-04-30 13:27:41 +00:00
|
|
|
case "DEL":
|
|
|
|
if len(subParams) < 1 {
|
|
|
|
return newNeedMoreParamsError(msg.Command)
|
|
|
|
}
|
|
|
|
caps := strings.Fields(subParams[0])
|
|
|
|
|
|
|
|
for _, c := range caps {
|
2022-03-14 18:24:39 +00:00
|
|
|
uc.caps.Del(c)
|
2020-04-30 13:27:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if uc.registered {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateSupportedCaps()
|
|
|
|
})
|
|
|
|
}
|
2020-03-13 14:12:44 +00:00
|
|
|
default:
|
|
|
|
uc.logger.Printf("unhandled message: %v", msg)
|
|
|
|
}
|
|
|
|
case "AUTHENTICATE":
|
|
|
|
if uc.saslClient == nil {
|
|
|
|
return fmt.Errorf("received unexpected AUTHENTICATE message")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: if a challenge is 400 bytes long, buffer it
|
|
|
|
var challengeStr string
|
|
|
|
if err := parseMessageParams(msg, &challengeStr); err != nil {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-03-13 14:12:44 +00:00
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{"*"},
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var challenge []byte
|
|
|
|
if challengeStr != "+" {
|
|
|
|
var err error
|
|
|
|
challenge, err = base64.StdEncoding.DecodeString(challengeStr)
|
|
|
|
if err != nil {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-03-13 14:12:44 +00:00
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{"*"},
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp []byte
|
|
|
|
var err error
|
|
|
|
if !uc.saslStarted {
|
|
|
|
_, resp, err = uc.saslClient.Start()
|
|
|
|
uc.saslStarted = true
|
|
|
|
} else {
|
|
|
|
resp, err = uc.saslClient.Next(challenge)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-03-13 14:12:44 +00:00
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{"*"},
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-05-30 07:41:47 +00:00
|
|
|
for _, msg := range xirc.GenerateSASL(resp) {
|
|
|
|
uc.SendMessage(ctx, msg)
|
2021-12-10 09:44:40 +00:00
|
|
|
}
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.RPL_LOGGEDIN:
|
2022-03-21 15:09:45 +00:00
|
|
|
var rawPrefix string
|
|
|
|
if err := parseMessageParams(msg, nil, &rawPrefix, &uc.account); err != nil {
|
2020-03-13 14:12:44 +00:00
|
|
|
return err
|
2020-03-13 10:26:43 +00:00
|
|
|
}
|
2022-03-21 15:09:45 +00:00
|
|
|
|
|
|
|
prefix := irc.ParsePrefix(rawPrefix)
|
|
|
|
uc.username = prefix.User
|
|
|
|
uc.hostname = prefix.Host
|
|
|
|
|
2021-06-14 19:44:38 +00:00
|
|
|
uc.logger.Printf("logged in with account %q", uc.account)
|
2021-11-19 18:21:48 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateAccount()
|
2022-03-21 15:09:45 +00:00
|
|
|
dc.updateHost()
|
2021-11-19 18:21:48 +00:00
|
|
|
})
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.RPL_LOGGEDOUT:
|
2022-03-21 15:09:45 +00:00
|
|
|
var rawPrefix string
|
|
|
|
if err := parseMessageParams(msg, nil, &rawPrefix); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-14 19:44:38 +00:00
|
|
|
uc.account = ""
|
2022-03-21 15:09:45 +00:00
|
|
|
|
|
|
|
prefix := irc.ParsePrefix(rawPrefix)
|
|
|
|
uc.username = prefix.User
|
|
|
|
uc.hostname = prefix.Host
|
|
|
|
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("logged out")
|
2021-11-19 18:21:48 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateAccount()
|
2022-03-21 15:09:45 +00:00
|
|
|
dc.updateHost()
|
|
|
|
})
|
2022-05-09 15:18:51 +00:00
|
|
|
case xirc.RPL_VISIBLEHOST:
|
2022-03-21 15:09:45 +00:00
|
|
|
var rawHost string
|
|
|
|
if err := parseMessageParams(msg, nil, &rawHost); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
parts := strings.SplitN(rawHost, "@", 2)
|
|
|
|
if len(parts) == 2 {
|
|
|
|
uc.username, uc.hostname = parts[0], parts[1]
|
|
|
|
} else {
|
|
|
|
uc.hostname = rawHost
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateHost()
|
2021-11-19 18:21:48 +00:00
|
|
|
})
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.ERR_NICKLOCKED, irc.RPL_SASLSUCCESS, irc.ERR_SASLFAIL, irc.ERR_SASLTOOLONG, irc.ERR_SASLABORTED:
|
2020-03-13 14:12:44 +00:00
|
|
|
var info string
|
|
|
|
if err := parseMessageParams(msg, nil, &info); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
switch msg.Command {
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.ERR_NICKLOCKED:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("invalid nick used with SASL authentication: %v", info)
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.ERR_SASLFAIL:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("SASL authentication failed: %v", info)
|
2020-03-19 13:51:45 +00:00
|
|
|
case irc.ERR_SASLTOOLONG:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("SASL message too long: %v", info)
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.saslClient = nil
|
|
|
|
uc.saslStarted = false
|
|
|
|
|
2021-11-21 15:10:54 +00:00
|
|
|
if dc, _ := uc.dequeueCommand("AUTHENTICATE"); dc != nil && dc.sasl != nil {
|
|
|
|
if msg.Command == irc.RPL_SASLSUCCESS {
|
2021-12-02 22:27:12 +00:00
|
|
|
uc.network.autoSaveSASLPlain(ctx, dc.sasl.plainUsername, dc.sasl.plainPassword)
|
2021-11-21 15:10:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dc.endSASL(msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !uc.registered {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2021-11-21 15:10:54 +00:00
|
|
|
Command: "CAP",
|
|
|
|
Params: []string{"END"},
|
|
|
|
})
|
|
|
|
}
|
2021-11-30 10:54:11 +00:00
|
|
|
case "REGISTER", "VERIFY":
|
|
|
|
if dc, cmd := uc.dequeueCommand(msg.Command); dc != nil {
|
|
|
|
if msg.Command == "REGISTER" {
|
|
|
|
var account, password string
|
|
|
|
if err := parseMessageParams(msg, nil, &account); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := parseMessageParams(cmd, nil, nil, &password); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-12-02 22:27:12 +00:00
|
|
|
uc.network.autoSaveSASLPlain(ctx, account, password)
|
2021-11-30 10:54:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dc.SendMessage(msg)
|
|
|
|
}
|
2020-02-06 15:39:09 +00:00
|
|
|
case irc.RPL_WELCOME:
|
2021-12-06 16:58:54 +00:00
|
|
|
if err := parseMessageParams(msg, &uc.nick); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-02-17 11:36:42 +00:00
|
|
|
uc.registered = true
|
2022-03-21 16:01:15 +00:00
|
|
|
uc.serverPrefix = msg.Prefix
|
2021-12-06 16:58:54 +00:00
|
|
|
uc.logger.Printf("connection registered with nick %q", uc.nick)
|
2020-02-06 18:22:04 +00:00
|
|
|
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uc.network.channels.Len() > 0 {
|
2020-07-06 09:06:20 +00:00
|
|
|
var channels, keys []string
|
2022-06-06 08:04:50 +00:00
|
|
|
uc.network.channels.ForEach(func(ch *database.Channel) {
|
2020-07-06 09:06:20 +00:00
|
|
|
channels = append(channels, ch.Name)
|
2020-06-03 15:18:57 +00:00
|
|
|
keys = append(keys, ch.Key)
|
2022-06-06 07:58:39 +00:00
|
|
|
})
|
2020-07-06 09:06:20 +00:00
|
|
|
|
2022-05-29 15:57:21 +00:00
|
|
|
for _, msg := range xirc.GenerateJoin(channels, keys) {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, msg)
|
2020-07-06 09:06:20 +00:00
|
|
|
}
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-06 16:04:49 +00:00
|
|
|
case irc.RPL_MYINFO:
|
2020-03-20 23:48:19 +00:00
|
|
|
if err := parseMessageParams(msg, nil, &uc.serverName, nil, &uc.availableUserModes, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case irc.RPL_ISUPPORT:
|
|
|
|
if err := parseMessageParams(msg, nil, nil); err != nil {
|
2020-02-07 11:36:02 +00:00
|
|
|
return err
|
2020-02-06 16:04:49 +00:00
|
|
|
}
|
2021-03-15 22:41:37 +00:00
|
|
|
|
|
|
|
var downstreamIsupport []string
|
2020-03-20 23:48:19 +00:00
|
|
|
for _, token := range msg.Params[1 : len(msg.Params)-1] {
|
|
|
|
parameter := token
|
2021-03-15 22:06:36 +00:00
|
|
|
var negate, hasValue bool
|
|
|
|
var value string
|
2020-03-20 23:48:19 +00:00
|
|
|
if strings.HasPrefix(token, "-") {
|
|
|
|
negate = true
|
|
|
|
token = token[1:]
|
2021-03-15 21:54:32 +00:00
|
|
|
} else if i := strings.IndexByte(token, '='); i >= 0 {
|
|
|
|
parameter = token[:i]
|
|
|
|
value = token[i+1:]
|
2021-03-15 22:06:36 +00:00
|
|
|
hasValue = true
|
2020-03-20 23:48:19 +00:00
|
|
|
}
|
2021-03-15 22:06:36 +00:00
|
|
|
|
|
|
|
if hasValue {
|
|
|
|
uc.isupport[parameter] = &value
|
|
|
|
} else if !negate {
|
|
|
|
uc.isupport[parameter] = nil
|
|
|
|
} else {
|
|
|
|
delete(uc.isupport, parameter)
|
|
|
|
}
|
|
|
|
|
2021-03-15 22:11:42 +00:00
|
|
|
var err error
|
|
|
|
switch parameter {
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
case "CASEMAPPING":
|
|
|
|
casemap, ok := parseCasemappingToken(value)
|
|
|
|
if !ok {
|
|
|
|
casemap = casemapRFC1459
|
|
|
|
}
|
|
|
|
uc.network.updateCasemapping(casemap)
|
|
|
|
uc.casemapIsSet = true
|
2021-03-15 22:11:42 +00:00
|
|
|
case "CHANMODES":
|
|
|
|
if !negate {
|
|
|
|
err = uc.handleChanModes(value)
|
|
|
|
} else {
|
|
|
|
uc.availableChannelModes = stdChannelModes
|
|
|
|
}
|
|
|
|
case "CHANTYPES":
|
|
|
|
if !negate {
|
2020-03-20 23:48:19 +00:00
|
|
|
uc.availableChannelTypes = value
|
2021-03-15 22:11:42 +00:00
|
|
|
} else {
|
|
|
|
uc.availableChannelTypes = stdChannelTypes
|
|
|
|
}
|
|
|
|
case "PREFIX":
|
|
|
|
if !negate {
|
|
|
|
err = uc.handleMemberships(value)
|
|
|
|
} else {
|
|
|
|
uc.availableMemberships = stdMemberships
|
2020-03-20 23:48:19 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-15 22:11:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-03-15 22:41:37 +00:00
|
|
|
|
|
|
|
if passthroughIsupport[parameter] {
|
|
|
|
downstreamIsupport = append(downstreamIsupport, token)
|
|
|
|
}
|
2020-02-06 16:04:49 +00:00
|
|
|
}
|
2021-03-15 22:41:37 +00:00
|
|
|
|
2021-12-04 19:07:23 +00:00
|
|
|
uc.updateMonitor()
|
|
|
|
|
2021-03-15 22:41:37 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
if dc.network == nil {
|
|
|
|
return
|
|
|
|
}
|
2022-05-29 15:57:21 +00:00
|
|
|
msgs := xirc.GenerateIsupport(dc.srv.prefix(), dc.nick, downstreamIsupport)
|
2021-03-15 22:41:37 +00:00
|
|
|
for _, msg := range msgs {
|
|
|
|
dc.SendMessage(msg)
|
|
|
|
}
|
|
|
|
})
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
case irc.ERR_NOMOTD, irc.RPL_ENDOFMOTD:
|
|
|
|
if !uc.casemapIsSet {
|
|
|
|
// upstream did not send any CASEMAPPING token, thus
|
|
|
|
// we assume it implements the old RFCs with rfc1459.
|
|
|
|
uc.casemapIsSet = true
|
|
|
|
uc.network.updateCasemapping(casemapRFC1459)
|
|
|
|
}
|
2021-06-09 19:25:15 +00:00
|
|
|
|
|
|
|
if !uc.gotMotd {
|
|
|
|
// Ignore the initial MOTD upon connection, but forward
|
|
|
|
// subsequent MOTD messages downstream
|
|
|
|
uc.gotMotd = true
|
2022-07-14 10:25:47 +00:00
|
|
|
|
|
|
|
// If the server doesn't support MONITOR, periodically try to
|
|
|
|
// regain our desired nick
|
|
|
|
if _, ok := uc.isupport["MONITOR"]; !ok {
|
|
|
|
uc.startRegainNickTimer()
|
|
|
|
}
|
|
|
|
|
2021-06-09 19:25:15 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-09 19:58:27 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: uc.srv.prefix(),
|
2021-06-09 19:25:15 +00:00
|
|
|
Command: msg.Command,
|
2021-06-09 19:58:27 +00:00
|
|
|
Params: msg.Params,
|
2021-06-09 19:25:15 +00:00
|
|
|
})
|
|
|
|
})
|
2020-03-23 02:18:16 +00:00
|
|
|
case "BATCH":
|
|
|
|
var tag string
|
|
|
|
if err := parseMessageParams(msg, &tag); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(tag, "+") {
|
|
|
|
tag = tag[1:]
|
|
|
|
if _, ok := uc.batches[tag]; ok {
|
|
|
|
return fmt.Errorf("unexpected BATCH reference tag: batch was already defined: %q", tag)
|
|
|
|
}
|
|
|
|
var batchType string
|
|
|
|
if err := parseMessageParams(msg, nil, &batchType); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-23 02:21:43 +00:00
|
|
|
label := label
|
|
|
|
if label == "" && msgBatch != nil {
|
|
|
|
label = msgBatch.Label
|
|
|
|
}
|
2022-05-30 07:45:40 +00:00
|
|
|
uc.batches[tag] = upstreamBatch{
|
2020-03-23 02:18:16 +00:00
|
|
|
Type: batchType,
|
|
|
|
Params: msg.Params[2:],
|
|
|
|
Outer: msgBatch,
|
2020-03-23 02:21:43 +00:00
|
|
|
Label: label,
|
2020-03-23 02:18:16 +00:00
|
|
|
}
|
|
|
|
} else if strings.HasPrefix(tag, "-") {
|
|
|
|
tag = tag[1:]
|
|
|
|
if _, ok := uc.batches[tag]; !ok {
|
|
|
|
return fmt.Errorf("unknown BATCH reference tag: %q", tag)
|
|
|
|
}
|
|
|
|
delete(uc.batches, tag)
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("unexpected BATCH reference tag: missing +/- prefix: %q", tag)
|
|
|
|
}
|
2020-02-07 11:19:42 +00:00
|
|
|
case "NICK":
|
2020-02-07 11:36:02 +00:00
|
|
|
var newNick string
|
|
|
|
if err := parseMessageParams(msg, &newNick); err != nil {
|
|
|
|
return err
|
2020-02-07 11:19:42 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 13:01:11 +00:00
|
|
|
me := false
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uc.isOurNick(msg.Prefix.Name) {
|
2020-02-17 11:36:42 +00:00
|
|
|
uc.logger.Printf("changed nick from %q to %q", uc.nick, newNick)
|
2020-04-07 13:01:11 +00:00
|
|
|
me = true
|
2020-02-17 11:36:42 +00:00
|
|
|
uc.nick = newNick
|
2022-07-14 10:25:47 +00:00
|
|
|
|
|
|
|
if uc.network.equalCasemap(uc.pendingRegainNick, newNick) {
|
|
|
|
uc.pendingRegainNick = ""
|
|
|
|
uc.stopRegainNickTimer()
|
|
|
|
}
|
2022-08-22 19:59:52 +00:00
|
|
|
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
|
|
|
|
if uc.network.equalCasemap(wantNick, newNick) {
|
|
|
|
uc.hasDesiredNick = true
|
|
|
|
}
|
2020-02-07 11:19:42 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 08:04:50 +00:00
|
|
|
uc.channels.ForEach(func(ch *upstreamChannel) {
|
2022-06-06 07:58:39 +00:00
|
|
|
memberships := ch.Members.Get(msg.Prefix.Name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if memberships != nil {
|
2022-06-06 07:58:39 +00:00
|
|
|
ch.Members.Del(msg.Prefix.Name)
|
|
|
|
ch.Members.Set(newNick, memberships)
|
2020-04-03 16:59:17 +00:00
|
|
|
uc.appendLog(ch.Name, msg)
|
2020-02-07 11:19:42 +00:00
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
})
|
2020-03-06 17:42:55 +00:00
|
|
|
|
2020-04-07 13:01:11 +00:00
|
|
|
if !me {
|
2020-03-06 17:42:55 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-16 15:23:35 +00:00
|
|
|
dc.SendMessage(dc.marshalMessage(msg, uc.network))
|
2020-03-06 17:42:55 +00:00
|
|
|
})
|
2020-04-30 22:37:42 +00:00
|
|
|
} else {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateNick()
|
|
|
|
})
|
2021-12-04 19:07:23 +00:00
|
|
|
uc.updateMonitor()
|
2020-03-06 17:42:55 +00:00
|
|
|
}
|
2021-05-25 18:24:45 +00:00
|
|
|
case "SETNAME":
|
|
|
|
var newRealname string
|
|
|
|
if err := parseMessageParams(msg, &newRealname); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: consider appending this message to logs
|
|
|
|
|
|
|
|
if uc.isOurNick(msg.Prefix.Name) {
|
|
|
|
uc.logger.Printf("changed realname from %q to %q", uc.realname, newRealname)
|
|
|
|
uc.realname = newRealname
|
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateRealname()
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(dc.marshalMessage(msg, uc.network))
|
|
|
|
})
|
|
|
|
}
|
2022-03-21 15:30:58 +00:00
|
|
|
case "CHGHOST":
|
|
|
|
var newUsername, newHostname string
|
|
|
|
if err := parseMessageParams(msg, &newUsername, &newHostname); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
newPrefix := &irc.Prefix{
|
|
|
|
Name: uc.nick,
|
|
|
|
User: newUsername,
|
|
|
|
Host: newHostname,
|
|
|
|
}
|
|
|
|
|
|
|
|
if uc.isOurNick(msg.Prefix.Name) {
|
|
|
|
uc.logger.Printf("changed prefix from %q to %q", msg.Prefix.Host, newPrefix)
|
|
|
|
uc.username = newUsername
|
|
|
|
uc.hostname = newHostname
|
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateHost()
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
// TODO: add fallback with QUIT/JOIN/MODE messages
|
|
|
|
dc.SendMessage(dc.marshalMessage(msg, uc.network))
|
|
|
|
})
|
|
|
|
}
|
2020-02-06 18:22:04 +00:00
|
|
|
case "JOIN":
|
2020-02-07 11:36:02 +00:00
|
|
|
var channels string
|
|
|
|
if err := parseMessageParams(msg, &channels); err != nil {
|
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-07 09:54:03 +00:00
|
|
|
|
2020-02-07 11:36:02 +00:00
|
|
|
for _, ch := range strings.Split(channels, ",") {
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uc.isOurNick(msg.Prefix.Name) {
|
2020-02-17 11:36:42 +00:00
|
|
|
uc.logger.Printf("joined channel %q", ch)
|
2022-06-06 07:23:17 +00:00
|
|
|
members := membershipsCasemapMap{newCasemapMap()}
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
members.casemap = uc.network.casemap
|
2022-06-06 08:04:50 +00:00
|
|
|
uc.channels.Set(&upstreamChannel{
|
2020-02-07 09:54:03 +00:00
|
|
|
Name: ch,
|
2020-02-17 11:36:42 +00:00
|
|
|
conn: uc,
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
Members: members,
|
|
|
|
})
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
uc.updateChannelAutoDetach(ch)
|
2020-03-20 23:48:19 +00:00
|
|
|
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-03-20 23:48:19 +00:00
|
|
|
Command: "MODE",
|
|
|
|
Params: []string{ch},
|
|
|
|
})
|
2020-02-07 09:54:03 +00:00
|
|
|
} else {
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(ch)
|
2020-02-07 09:54:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
ch.Members.Set(msg.Prefix.Name, &xirc.MembershipSet{})
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-07 09:54:03 +00:00
|
|
|
|
2020-04-07 17:45:29 +00:00
|
|
|
chMsg := msg.Copy()
|
|
|
|
chMsg.Params[0] = ch
|
2022-04-10 16:05:12 +00:00
|
|
|
uc.produce(ch, chMsg, 0)
|
2020-02-19 17:25:19 +00:00
|
|
|
}
|
2020-02-07 09:54:03 +00:00
|
|
|
case "PART":
|
2020-02-07 11:36:02 +00:00
|
|
|
var channels string
|
|
|
|
if err := parseMessageParams(msg, &channels); err != nil {
|
|
|
|
return err
|
2020-02-07 09:54:03 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 11:36:02 +00:00
|
|
|
for _, ch := range strings.Split(channels, ",") {
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uc.isOurNick(msg.Prefix.Name) {
|
2020-02-17 11:36:42 +00:00
|
|
|
uc.logger.Printf("parted channel %q", ch)
|
2022-06-06 07:58:39 +00:00
|
|
|
if uch := uc.channels.Get(ch); uch != nil {
|
|
|
|
uc.channels.Del(ch)
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
uch.updateAutoDetach(0)
|
|
|
|
}
|
2020-02-07 09:54:03 +00:00
|
|
|
} else {
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(ch)
|
2020-02-07 09:54:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
ch.Members.Del(msg.Prefix.Name)
|
2020-02-07 09:54:03 +00:00
|
|
|
}
|
|
|
|
|
2020-04-07 17:45:29 +00:00
|
|
|
chMsg := msg.Copy()
|
|
|
|
chMsg.Params[0] = ch
|
2022-04-10 16:05:12 +00:00
|
|
|
uc.produce(ch, chMsg, 0)
|
2020-02-19 17:25:19 +00:00
|
|
|
}
|
2020-03-25 22:46:36 +00:00
|
|
|
case "KICK":
|
|
|
|
var channel, user string
|
|
|
|
if err := parseMessageParams(msg, &channel, &user); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uc.isOurNick(user) {
|
2020-03-25 22:46:36 +00:00
|
|
|
uc.logger.Printf("kicked from channel %q by %s", channel, msg.Prefix.Name)
|
2022-06-06 07:58:39 +00:00
|
|
|
uc.channels.Del(channel)
|
2020-03-25 22:46:36 +00:00
|
|
|
} else {
|
|
|
|
ch, err := uc.getChannel(channel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
ch.Members.Del(user)
|
2020-03-25 22:46:36 +00:00
|
|
|
}
|
|
|
|
|
2022-04-10 16:05:12 +00:00
|
|
|
uc.produce(channel, msg, 0)
|
2020-03-06 17:51:11 +00:00
|
|
|
case "QUIT":
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uc.isOurNick(msg.Prefix.Name) {
|
2020-03-06 17:51:11 +00:00
|
|
|
uc.logger.Printf("quit")
|
|
|
|
}
|
|
|
|
|
2022-06-06 08:04:50 +00:00
|
|
|
uc.channels.ForEach(func(ch *upstreamChannel) {
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if ch.Members.Has(msg.Prefix.Name) {
|
2022-06-06 07:58:39 +00:00
|
|
|
ch.Members.Del(msg.Prefix.Name)
|
2020-04-03 16:59:17 +00:00
|
|
|
uc.appendLog(ch.Name, msg)
|
2020-03-25 22:51:28 +00:00
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
})
|
2020-03-06 17:51:11 +00:00
|
|
|
|
|
|
|
if msg.Prefix.Name != uc.nick {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-16 15:23:35 +00:00
|
|
|
dc.SendMessage(dc.marshalMessage(msg, uc.network))
|
2020-03-06 17:51:11 +00:00
|
|
|
})
|
|
|
|
}
|
2020-02-06 18:22:04 +00:00
|
|
|
case irc.RPL_TOPIC, irc.RPL_NOTOPIC:
|
2020-02-07 11:36:02 +00:00
|
|
|
var name, topic string
|
|
|
|
if err := parseMessageParams(msg, nil, &name, &topic); err != nil {
|
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(name)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Command == irc.RPL_TOPIC {
|
2020-02-07 11:36:02 +00:00
|
|
|
ch.Topic = topic
|
2020-02-06 18:22:04 +00:00
|
|
|
} else {
|
|
|
|
ch.Topic = ""
|
|
|
|
}
|
|
|
|
case "TOPIC":
|
2020-02-07 11:36:02 +00:00
|
|
|
var name string
|
2020-03-04 14:45:14 +00:00
|
|
|
if err := parseMessageParams(msg, &name); err != nil {
|
2020-02-07 11:36:02 +00:00
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-02-17 11:36:42 +00:00
|
|
|
ch, err := uc.getChannel(name)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(msg.Params) > 1 {
|
|
|
|
ch.Topic = msg.Params[1]
|
2020-08-20 08:39:23 +00:00
|
|
|
ch.TopicWho = msg.Prefix.Copy()
|
|
|
|
ch.TopicTime = time.Now() // TODO use msg.Tags["time"]
|
2020-02-06 18:22:04 +00:00
|
|
|
} else {
|
|
|
|
ch.Topic = ""
|
|
|
|
}
|
2022-04-10 16:05:12 +00:00
|
|
|
uc.produce(ch.Name, msg, 0)
|
2020-03-20 23:48:19 +00:00
|
|
|
case "MODE":
|
|
|
|
var name, modeStr string
|
|
|
|
if err := parseMessageParams(msg, &name, &modeStr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !uc.isChannel(name) { // user mode change
|
|
|
|
if name != uc.nick {
|
|
|
|
return fmt.Errorf("received MODE message for unknown nick %q", name)
|
|
|
|
}
|
2021-06-09 19:58:27 +00:00
|
|
|
|
|
|
|
if err := uc.modes.Apply(modeStr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
if dc.upstream() == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dc.SendMessage(msg)
|
|
|
|
})
|
2020-03-20 23:48:19 +00:00
|
|
|
} else { // channel mode change
|
|
|
|
ch, err := uc.getChannel(name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-04-30 21:42:33 +00:00
|
|
|
needMarshaling, err := applyChannelModes(ch, modeStr, msg.Params[2:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-03-20 23:48:19 +00:00
|
|
|
}
|
|
|
|
|
2020-04-30 21:42:33 +00:00
|
|
|
uc.appendLog(ch.Name, msg)
|
2020-06-12 12:35:26 +00:00
|
|
|
|
2022-06-06 07:58:39 +00:00
|
|
|
c := uc.network.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if c == nil || !c.Detached {
|
2020-06-12 12:35:26 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
params := make([]string, len(msg.Params))
|
|
|
|
params[0] = dc.marshalEntity(uc.network, name)
|
|
|
|
params[1] = modeStr
|
|
|
|
|
|
|
|
copy(params[2:], msg.Params[2:])
|
|
|
|
for i, modeParam := range params[2:] {
|
|
|
|
if _, ok := needMarshaling[i]; ok {
|
|
|
|
params[2+i] = dc.marshalEntity(uc.network, modeParam)
|
|
|
|
}
|
2020-04-30 21:42:33 +00:00
|
|
|
}
|
|
|
|
|
2020-06-12 12:35:26 +00:00
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc.network, msg.Prefix),
|
|
|
|
Command: "MODE",
|
|
|
|
Params: params,
|
|
|
|
})
|
2020-04-30 21:42:33 +00:00
|
|
|
})
|
2020-06-12 12:35:26 +00:00
|
|
|
}
|
2020-03-20 23:48:19 +00:00
|
|
|
}
|
|
|
|
case irc.RPL_UMODEIS:
|
|
|
|
if err := parseMessageParams(msg, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
modeStr := ""
|
|
|
|
if len(msg.Params) > 1 {
|
|
|
|
modeStr = msg.Params[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.modes = ""
|
|
|
|
if err := uc.modes.Apply(modeStr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-06-09 19:58:27 +00:00
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
if dc.upstream() == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dc.SendMessage(msg)
|
|
|
|
})
|
2020-03-20 23:48:19 +00:00
|
|
|
case irc.RPL_CHANNELMODEIS:
|
|
|
|
var channel string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
modeStr := ""
|
|
|
|
if len(msg.Params) > 2 {
|
|
|
|
modeStr = msg.Params[2]
|
|
|
|
}
|
|
|
|
|
|
|
|
ch, err := uc.getChannel(channel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
firstMode := ch.modes == nil
|
|
|
|
ch.modes = make(map[byte]string)
|
2020-04-30 21:42:33 +00:00
|
|
|
if _, err := applyChannelModes(ch, modeStr, msg.Params[3:]); err != nil {
|
2020-03-20 23:48:19 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-06 07:58:39 +00:00
|
|
|
c := uc.network.channels.Get(channel)
|
2021-12-09 11:12:20 +00:00
|
|
|
if firstMode && (c == nil || !c.Detached) {
|
|
|
|
modeStr, modeParams := ch.modes.Format()
|
2020-06-12 12:35:26 +00:00
|
|
|
|
2021-12-09 11:12:20 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
params := []string{dc.nick, dc.marshalEntity(uc.network, channel), modeStr}
|
|
|
|
params = append(params, modeParams...)
|
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_CHANNELMODEIS,
|
|
|
|
Params: params,
|
2020-03-20 23:48:19 +00:00
|
|
|
})
|
2021-12-09 11:12:20 +00:00
|
|
|
})
|
2020-03-20 23:48:19 +00:00
|
|
|
}
|
2022-05-09 15:18:51 +00:00
|
|
|
case xirc.RPL_CREATIONTIME:
|
2020-03-26 04:51:47 +00:00
|
|
|
var channel, creationTime string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &creationTime); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ch, err := uc.getChannel(channel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
firstCreationTime := ch.creationTime == ""
|
|
|
|
ch.creationTime = creationTime
|
2021-12-09 11:12:20 +00:00
|
|
|
|
2022-06-06 07:58:39 +00:00
|
|
|
c := uc.network.channels.Get(channel)
|
2021-12-09 11:12:20 +00:00
|
|
|
if firstCreationTime && (c == nil || !c.Detached) {
|
2020-03-26 04:51:47 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
2022-05-09 15:18:51 +00:00
|
|
|
Command: xirc.RPL_CREATIONTIME,
|
2020-08-20 07:15:47 +00:00
|
|
|
Params: []string{dc.nick, dc.marshalEntity(uc.network, ch.Name), creationTime},
|
2020-03-26 04:51:47 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2022-05-09 15:18:51 +00:00
|
|
|
case xirc.RPL_TOPICWHOTIME:
|
2021-12-09 11:12:20 +00:00
|
|
|
var channel, who, timeStr string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &who, &timeStr); err != nil {
|
2020-02-07 11:36:02 +00:00
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2021-12-09 11:12:20 +00:00
|
|
|
|
|
|
|
ch, err := uc.getChannel(channel)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-12-09 11:12:20 +00:00
|
|
|
|
2020-08-20 08:39:23 +00:00
|
|
|
firstTopicWhoTime := ch.TopicWho == nil
|
|
|
|
ch.TopicWho = irc.ParsePrefix(who)
|
2020-02-07 11:36:02 +00:00
|
|
|
sec, err := strconv.ParseInt(timeStr, 10, 64)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to parse topic time: %v", err)
|
|
|
|
}
|
|
|
|
ch.TopicTime = time.Unix(sec, 0)
|
2021-12-09 11:12:20 +00:00
|
|
|
|
2022-06-06 07:58:39 +00:00
|
|
|
c := uc.network.channels.Get(channel)
|
2021-12-09 11:12:20 +00:00
|
|
|
if firstTopicWhoTime && (c == nil || !c.Detached) {
|
2020-08-20 08:39:23 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
topicWho := dc.marshalUserPrefix(uc.network, ch.TopicWho)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
2022-05-09 15:18:51 +00:00
|
|
|
Command: xirc.RPL_TOPICWHOTIME,
|
2020-08-20 08:39:23 +00:00
|
|
|
Params: []string{
|
|
|
|
dc.nick,
|
|
|
|
dc.marshalEntity(uc.network, ch.Name),
|
|
|
|
topicWho.String(),
|
|
|
|
timeStr,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
case irc.RPL_LIST:
|
|
|
|
var channel, clients, topic string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &clients, &topic); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
dc, cmd := uc.currentPendingCommand("LIST")
|
2021-11-09 20:32:26 +00:00
|
|
|
if cmd == nil {
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
return fmt.Errorf("unexpected RPL_LIST: no matching pending LIST")
|
2021-11-09 20:32:26 +00:00
|
|
|
} else if dc == nil {
|
|
|
|
return nil
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
|
|
|
|
2021-11-09 20:32:26 +00:00
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_LIST,
|
|
|
|
Params: []string{dc.nick, dc.marshalEntity(uc.network, channel), clients, topic},
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
})
|
|
|
|
case irc.RPL_LISTEND:
|
2021-11-09 21:09:17 +00:00
|
|
|
dc, cmd := uc.dequeueCommand("LIST")
|
2021-11-09 20:32:26 +00:00
|
|
|
if cmd == nil {
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
return fmt.Errorf("unexpected RPL_LISTEND: no matching pending LIST")
|
2021-11-09 20:32:26 +00:00
|
|
|
} else if dc == nil {
|
|
|
|
return nil
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
}
|
2021-11-09 20:32:26 +00:00
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_LISTEND,
|
|
|
|
Params: []string{dc.nick, "End of /LIST"},
|
|
|
|
})
|
2020-02-06 18:22:04 +00:00
|
|
|
case irc.RPL_NAMREPLY:
|
2020-02-07 11:36:02 +00:00
|
|
|
var name, statusStr, members string
|
|
|
|
if err := parseMessageParams(msg, nil, &statusStr, &name, &members); err != nil {
|
|
|
|
return err
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
2020-03-21 00:24:29 +00:00
|
|
|
|
2022-06-06 07:58:39 +00:00
|
|
|
ch := uc.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if ch == nil {
|
2020-03-21 00:24:29 +00:00
|
|
|
// NAMES on a channel we have not joined, forward to downstream
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-04-16 15:19:00 +00:00
|
|
|
channel := dc.marshalEntity(uc.network, name)
|
2020-03-27 19:09:38 +00:00
|
|
|
members := splitSpace(members)
|
2020-03-21 00:24:29 +00:00
|
|
|
for i, member := range members {
|
2020-04-30 21:39:59 +00:00
|
|
|
memberships, nick := uc.parseMembershipPrefix(member)
|
2022-05-30 07:12:28 +00:00
|
|
|
members[i] = formatMemberPrefix(memberships, dc) + dc.marshalEntity(uc.network, nick)
|
2020-03-21 00:24:29 +00:00
|
|
|
}
|
|
|
|
memberStr := strings.Join(members, " ")
|
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_NAMREPLY,
|
|
|
|
Params: []string{dc.nick, statusStr, channel, memberStr},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
return nil
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
|
|
|
|
2022-05-29 15:28:25 +00:00
|
|
|
status, err := xirc.ParseChannelStatus(statusStr)
|
2020-02-06 18:22:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ch.Status = status
|
|
|
|
|
2020-03-27 19:09:38 +00:00
|
|
|
for _, s := range splitSpace(members) {
|
2020-04-30 21:39:59 +00:00
|
|
|
memberships, nick := uc.parseMembershipPrefix(s)
|
2022-06-06 07:58:39 +00:00
|
|
|
ch.Members.Set(nick, &memberships)
|
2020-02-06 18:22:04 +00:00
|
|
|
}
|
|
|
|
case irc.RPL_ENDOFNAMES:
|
2020-02-07 11:36:02 +00:00
|
|
|
var name string
|
|
|
|
if err := parseMessageParams(msg, nil, &name); err != nil {
|
|
|
|
return err
|
2020-02-06 20:43:22 +00:00
|
|
|
}
|
2020-03-21 00:24:29 +00:00
|
|
|
|
2022-06-06 07:58:39 +00:00
|
|
|
ch := uc.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if ch == nil {
|
2020-03-21 00:24:29 +00:00
|
|
|
// NAMES on a channel we have not joined, forward to downstream
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
2020-04-16 15:19:00 +00:00
|
|
|
channel := dc.marshalEntity(uc.network, name)
|
2020-03-21 00:24:29 +00:00
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_ENDOFNAMES,
|
|
|
|
Params: []string{dc.nick, channel, "End of /NAMES list"},
|
|
|
|
})
|
|
|
|
})
|
|
|
|
return nil
|
2020-02-06 20:43:22 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 09:54:03 +00:00
|
|
|
if ch.complete {
|
|
|
|
return fmt.Errorf("received unexpected RPL_ENDOFNAMES")
|
|
|
|
}
|
2020-02-06 20:43:22 +00:00
|
|
|
ch.complete = true
|
2020-02-06 21:19:31 +00:00
|
|
|
|
2022-06-06 07:58:39 +00:00
|
|
|
c := uc.network.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if c == nil || !c.Detached {
|
2020-06-12 12:35:26 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2021-01-29 15:57:38 +00:00
|
|
|
forwardChannel(ctx, dc, ch)
|
2020-06-12 12:35:26 +00:00
|
|
|
})
|
|
|
|
}
|
2020-03-19 23:23:19 +00:00
|
|
|
case irc.RPL_WHOREPLY:
|
2022-02-09 14:20:32 +00:00
|
|
|
var channel, username, host, server, nick, flags, trailing string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &username, &host, &server, &nick, &flags, &trailing); err != nil {
|
2020-03-19 23:23:19 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
dc, cmd := uc.currentPendingCommand("WHO")
|
|
|
|
if cmd == nil {
|
|
|
|
return fmt.Errorf("unexpected RPL_WHOREPLY: no matching pending WHO")
|
|
|
|
} else if dc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if channel != "*" {
|
|
|
|
channel = dc.marshalEntity(uc.network, channel)
|
|
|
|
}
|
|
|
|
nick = dc.marshalEntity(uc.network, nick)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_WHOREPLY,
|
2022-02-09 14:20:32 +00:00
|
|
|
Params: []string{dc.nick, channel, username, host, server, nick, flags, trailing},
|
2020-03-19 23:23:19 +00:00
|
|
|
})
|
2022-05-09 15:18:51 +00:00
|
|
|
case xirc.RPL_WHOSPCRPL:
|
2021-11-09 21:09:17 +00:00
|
|
|
dc, cmd := uc.currentPendingCommand("WHO")
|
|
|
|
if cmd == nil {
|
|
|
|
return fmt.Errorf("unexpected RPL_WHOSPCRPL: no matching pending WHO")
|
|
|
|
} else if dc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only supported in single-upstream mode, so forward as-is
|
|
|
|
dc.SendMessage(msg)
|
2020-03-19 23:23:19 +00:00
|
|
|
case irc.RPL_ENDOFWHO:
|
|
|
|
var name string
|
|
|
|
if err := parseMessageParams(msg, nil, &name); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-11-09 21:09:17 +00:00
|
|
|
dc, cmd := uc.dequeueCommand("WHO")
|
|
|
|
if cmd == nil {
|
2022-07-11 09:25:19 +00:00
|
|
|
// Some servers send RPL_TRYAGAIN followed by RPL_ENDOFWHO
|
|
|
|
return nil
|
2021-11-09 21:09:17 +00:00
|
|
|
} else if dc == nil {
|
2022-07-11 09:25:19 +00:00
|
|
|
// Downstream connection is gone
|
2021-11-09 21:09:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
mask := "*"
|
|
|
|
if len(cmd.Params) > 0 {
|
|
|
|
mask = cmd.Params[0]
|
|
|
|
}
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_ENDOFWHO,
|
|
|
|
Params: []string{dc.nick, mask, "End of /WHO list"},
|
2020-03-20 01:15:23 +00:00
|
|
|
})
|
2022-05-09 15:18:51 +00:00
|
|
|
case xirc.RPL_WHOISCERTFP, xirc.RPL_WHOISREGNICK, irc.RPL_WHOISUSER, irc.RPL_WHOISSERVER, irc.RPL_WHOISOPERATOR, irc.RPL_WHOISIDLE, xirc.RPL_WHOISSPECIAL, xirc.RPL_WHOISACCOUNT, xirc.RPL_WHOISACTUALLY, xirc.RPL_WHOISHOST, xirc.RPL_WHOISMODES, xirc.RPL_WHOISSECURE:
|
2020-03-20 01:15:23 +00:00
|
|
|
var nick string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-04-04 07:57:08 +00:00
|
|
|
dc, cmd := uc.currentPendingCommand("WHOIS")
|
|
|
|
if cmd == nil {
|
|
|
|
return fmt.Errorf("unexpected WHOIS reply %q: no matching pending WHOIS", msg.Command)
|
|
|
|
} else if dc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
msg := msg.Copy()
|
|
|
|
msg.Params[1] = dc.marshalEntity(uc.network, nick)
|
|
|
|
dc.SendMessage(msg)
|
2020-03-20 01:15:23 +00:00
|
|
|
case irc.RPL_WHOISCHANNELS:
|
|
|
|
var nick, channelList string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick, &channelList); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-27 19:09:38 +00:00
|
|
|
channels := splitSpace(channelList)
|
2020-03-20 01:15:23 +00:00
|
|
|
|
2022-04-04 07:57:08 +00:00
|
|
|
dc, cmd := uc.currentPendingCommand("WHOIS")
|
|
|
|
if cmd == nil {
|
|
|
|
return fmt.Errorf("unexpected RPL_WHOISCHANNELS: no matching pending WHOIS")
|
|
|
|
} else if dc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
nick = dc.marshalEntity(uc.network, nick)
|
|
|
|
l := make([]string, len(channels))
|
|
|
|
for i, channel := range channels {
|
|
|
|
prefix, channel := uc.parseMembershipPrefix(channel)
|
|
|
|
channel = dc.marshalEntity(uc.network, channel)
|
2022-05-30 07:12:28 +00:00
|
|
|
l[i] = formatMemberPrefix(prefix, dc) + channel
|
2022-04-04 07:57:08 +00:00
|
|
|
}
|
|
|
|
channelList = strings.Join(l, " ")
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_WHOISCHANNELS,
|
|
|
|
Params: []string{dc.nick, nick, channelList},
|
2020-03-20 01:15:23 +00:00
|
|
|
})
|
|
|
|
case irc.RPL_ENDOFWHOIS:
|
|
|
|
var nick string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-04-04 07:57:08 +00:00
|
|
|
dc, cmd := uc.dequeueCommand("WHOIS")
|
|
|
|
if cmd == nil {
|
|
|
|
return fmt.Errorf("unexpected RPL_ENDOFWHOIS: no matching pending WHOIS")
|
|
|
|
} else if dc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
nick = dc.marshalEntity(uc.network, nick)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_ENDOFWHOIS,
|
|
|
|
Params: []string{dc.nick, nick, "End of /WHOIS list"},
|
2020-03-19 23:23:19 +00:00
|
|
|
})
|
2020-03-18 02:11:38 +00:00
|
|
|
case "INVITE":
|
2020-04-29 13:00:17 +00:00
|
|
|
var nick, channel string
|
2020-03-18 02:11:38 +00:00
|
|
|
if err := parseMessageParams(msg, &nick, &channel); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
weAreInvited := uc.isOurNick(nick)
|
2021-01-21 08:22:15 +00:00
|
|
|
|
2020-03-18 02:11:38 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2022-03-14 18:15:35 +00:00
|
|
|
if !weAreInvited && !dc.caps.IsEnabled("invite-notify") {
|
2021-01-21 08:22:15 +00:00
|
|
|
return
|
|
|
|
}
|
2020-03-18 02:11:38 +00:00
|
|
|
dc.SendMessage(&irc.Message{
|
2020-04-16 15:19:00 +00:00
|
|
|
Prefix: dc.marshalUserPrefix(uc.network, msg.Prefix),
|
2020-03-18 02:11:38 +00:00
|
|
|
Command: "INVITE",
|
2020-04-16 15:19:00 +00:00
|
|
|
Params: []string{dc.marshalEntity(uc.network, nick), dc.marshalEntity(uc.network, channel)},
|
2020-03-18 02:11:38 +00:00
|
|
|
})
|
|
|
|
})
|
2021-11-27 10:48:10 +00:00
|
|
|
|
|
|
|
if weAreInvited {
|
2022-08-17 14:09:12 +00:00
|
|
|
go uc.network.broadcastWebPush(msg)
|
2021-11-27 10:48:10 +00:00
|
|
|
}
|
2020-03-26 05:03:07 +00:00
|
|
|
case irc.RPL_INVITING:
|
2020-04-29 13:00:17 +00:00
|
|
|
var nick, channel string
|
2020-05-27 21:46:27 +00:00
|
|
|
if err := parseMessageParams(msg, nil, &nick, &channel); err != nil {
|
2020-03-26 05:03:07 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_INVITING,
|
2020-04-16 15:19:00 +00:00
|
|
|
Params: []string{dc.nick, dc.marshalEntity(uc.network, nick), dc.marshalEntity(uc.network, channel)},
|
2020-03-26 05:03:07 +00:00
|
|
|
})
|
|
|
|
})
|
2021-11-09 16:59:43 +00:00
|
|
|
case irc.RPL_MONONLINE, irc.RPL_MONOFFLINE:
|
|
|
|
var targetsStr string
|
|
|
|
if err := parseMessageParams(msg, nil, &targetsStr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
targets := strings.Split(targetsStr, ",")
|
|
|
|
|
|
|
|
online := msg.Command == irc.RPL_MONONLINE
|
|
|
|
for _, target := range targets {
|
|
|
|
prefix := irc.ParsePrefix(target)
|
2022-06-06 07:58:39 +00:00
|
|
|
uc.monitored.Set(prefix.Name, online)
|
2021-11-09 16:59:43 +00:00
|
|
|
}
|
|
|
|
|
2021-12-04 19:07:23 +00:00
|
|
|
// Check if the nick we want is now free
|
2022-05-09 10:34:43 +00:00
|
|
|
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
|
2022-08-22 19:59:52 +00:00
|
|
|
if !online && !uc.isOurNick(wantNick) && !uc.hasDesiredNick {
|
2021-12-04 19:07:23 +00:00
|
|
|
found := false
|
|
|
|
for _, target := range targets {
|
|
|
|
prefix := irc.ParsePrefix(target)
|
2022-07-08 12:52:10 +00:00
|
|
|
if uc.network.equalCasemap(prefix.Name, wantNick) {
|
2021-12-04 19:07:23 +00:00
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if found {
|
|
|
|
uc.logger.Printf("desired nick %q is now available", wantNick)
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2021-12-04 19:07:23 +00:00
|
|
|
Command: "NICK",
|
|
|
|
Params: []string{wantNick},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-09 16:59:43 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
for _, target := range targets {
|
|
|
|
prefix := irc.ParsePrefix(target)
|
|
|
|
if dc.monitored.Has(prefix.Name) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: []string{dc.nick, target},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
case irc.ERR_MONLISTFULL:
|
|
|
|
var limit, targetsStr string
|
|
|
|
if err := parseMessageParams(msg, nil, &limit, &targetsStr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
targets := strings.Split(targetsStr, ",")
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
for _, target := range targets {
|
|
|
|
if dc.monitored.Has(target) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: []string{dc.nick, limit, target},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2020-04-29 12:53:48 +00:00
|
|
|
case irc.RPL_AWAY:
|
|
|
|
var nick, reason string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick, &reason); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-04-29 13:32:22 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-29 12:53:48 +00:00
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: irc.RPL_AWAY,
|
|
|
|
Params: []string{dc.nick, dc.marshalEntity(uc.network, nick), reason},
|
|
|
|
})
|
|
|
|
})
|
2021-10-17 19:53:18 +00:00
|
|
|
case "AWAY", "ACCOUNT":
|
2021-10-17 19:49:37 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.marshalUserPrefix(uc.network, msg.Prefix),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: msg.Params,
|
|
|
|
})
|
|
|
|
})
|
2020-05-19 15:06:52 +00:00
|
|
|
case irc.RPL_BANLIST, irc.RPL_INVITELIST, irc.RPL_EXCEPTLIST:
|
|
|
|
var channel, mask string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &mask); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
var addNick, addTime string
|
|
|
|
if len(msg.Params) >= 5 {
|
|
|
|
addNick = msg.Params[3]
|
|
|
|
addTime = msg.Params[4]
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
channel := dc.marshalEntity(uc.network, channel)
|
|
|
|
|
|
|
|
var params []string
|
|
|
|
if addNick != "" && addTime != "" {
|
|
|
|
addNick := dc.marshalEntity(uc.network, addNick)
|
|
|
|
params = []string{dc.nick, channel, mask, addNick, addTime}
|
|
|
|
} else {
|
|
|
|
params = []string{dc.nick, channel, mask}
|
|
|
|
}
|
|
|
|
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: params,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
case irc.RPL_ENDOFBANLIST, irc.RPL_ENDOFINVITELIST, irc.RPL_ENDOFEXCEPTLIST:
|
|
|
|
var channel, trailing string
|
|
|
|
if err := parseMessageParams(msg, nil, &channel, &trailing); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
upstreamChannel := dc.marshalEntity(uc.network, channel)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: []string{dc.nick, upstreamChannel, trailing},
|
|
|
|
})
|
|
|
|
})
|
2022-04-04 07:57:08 +00:00
|
|
|
case irc.ERR_NOSUCHNICK:
|
|
|
|
var nick, reason string
|
|
|
|
if err := parseMessageParams(msg, nil, &nick, &reason); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
cm := uc.network.casemap
|
|
|
|
dc, cmd := uc.currentPendingCommand("WHOIS")
|
|
|
|
if cmd != nil && cm(cmd.Params[len(cmd.Params)-1]) == cm(nick) {
|
|
|
|
uc.dequeueCommand("WHOIS")
|
|
|
|
if dc != nil {
|
|
|
|
nick = dc.marshalEntity(uc.network, nick)
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: uc.srv.prefix(),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: []string{dc.nick, nick, reason},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2022-07-04 15:13:31 +00:00
|
|
|
case xirc.ERR_UNKNOWNERROR, irc.ERR_UNKNOWNCOMMAND, irc.ERR_NEEDMOREPARAMS, irc.RPL_TRYAGAIN:
|
2020-05-19 15:33:44 +00:00
|
|
|
var command, reason string
|
|
|
|
if err := parseMessageParams(msg, nil, &command, &reason); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-11-30 10:54:11 +00:00
|
|
|
if dc, _ := uc.dequeueCommand(command); dc != nil && downstreamID == 0 {
|
|
|
|
downstreamID = dc.id
|
2020-05-19 15:33:44 +00:00
|
|
|
}
|
|
|
|
|
2022-07-04 15:10:40 +00:00
|
|
|
if command == "AUTHENTICATE" {
|
|
|
|
uc.saslClient = nil
|
|
|
|
uc.saslStarted = false
|
|
|
|
}
|
|
|
|
|
2020-07-08 16:21:52 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: uc.srv.prefix(),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: []string{dc.nick, command, reason},
|
2020-05-19 15:33:44 +00:00
|
|
|
})
|
2020-07-08 16:21:52 +00:00
|
|
|
})
|
2021-11-30 10:54:11 +00:00
|
|
|
case "FAIL":
|
2021-12-02 16:58:56 +00:00
|
|
|
var command, code string
|
|
|
|
if err := parseMessageParams(msg, &command, &code); err != nil {
|
2021-11-30 10:54:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-12-02 16:58:56 +00:00
|
|
|
if !uc.registered && command == "*" && code == "ACCOUNT_REQUIRED" {
|
|
|
|
return registrationError{msg}
|
|
|
|
}
|
|
|
|
|
2021-11-30 10:54:11 +00:00
|
|
|
if dc, _ := uc.dequeueCommand(command); dc != nil && downstreamID == 0 {
|
|
|
|
downstreamID = dc.id
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(msg)
|
|
|
|
})
|
2020-03-23 02:21:43 +00:00
|
|
|
case "ACK":
|
|
|
|
// Ignore
|
2020-04-01 10:16:32 +00:00
|
|
|
case irc.RPL_NOWAWAY, irc.RPL_UNAWAY:
|
|
|
|
// Ignore
|
2020-02-06 16:04:49 +00:00
|
|
|
case irc.RPL_YOURHOST, irc.RPL_CREATED:
|
2020-02-06 15:39:09 +00:00
|
|
|
// Ignore
|
|
|
|
case irc.RPL_LUSERCLIENT, irc.RPL_LUSEROP, irc.RPL_LUSERUNKNOWN, irc.RPL_LUSERCHANNELS, irc.RPL_LUSERME:
|
2021-06-23 09:45:14 +00:00
|
|
|
fallthrough
|
2022-05-09 15:18:51 +00:00
|
|
|
case irc.RPL_STATSVLINE, xirc.RPL_STATSPING, irc.RPL_STATSBLINE, irc.RPL_STATSDLINE:
|
2021-06-23 09:45:14 +00:00
|
|
|
fallthrough
|
2022-05-09 15:18:51 +00:00
|
|
|
case xirc.RPL_LOCALUSERS, xirc.RPL_GLOBALUSERS:
|
2021-06-23 09:45:14 +00:00
|
|
|
fallthrough
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
case irc.RPL_MOTDSTART, irc.RPL_MOTD:
|
2021-06-23 09:45:14 +00:00
|
|
|
// Ignore these messages if they're part of the initial registration
|
|
|
|
// message burst. Forward them if the user explicitly asked for them.
|
2021-06-09 19:25:15 +00:00
|
|
|
if !uc.gotMotd {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-09 19:58:27 +00:00
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: uc.srv.prefix(),
|
2021-06-09 19:25:15 +00:00
|
|
|
Command: msg.Command,
|
2021-06-09 19:58:27 +00:00
|
|
|
Params: msg.Params,
|
2021-06-09 19:25:15 +00:00
|
|
|
})
|
|
|
|
})
|
Add LIST support
This commit adds support for downstream LIST messages from multiple
concurrent downstreams to multiple concurrent upstreams, including
support for multiple pending LIST requests from the same downstream.
Because a unique RPL_LISTEND message must be sent to the requesting
downstream, and that there might be multiple upstreams, each sending
their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is
required to match RPL_LISTEND together in order to only send one back
downstream.
This commit adds a list of "pending LIST" structs, which each contain a
map of all upstreams that yet need to send a RPL_LISTEND, and the
corresponding LIST request associated with that response. This list of
pending LISTs is sorted according to the order that the requesting
downstreams sent the LIST messages in. Each pending set also stores the
id of the requesting downstream, in order to only forward the replies to
it and no other downstream. (This is important because LIST replies can
typically amount to several thousands messages on large servers.)
When a single downstream makes multiple LIST requests, only the first
one will be immediately sent to the upstream servers. The next ones will
be buffered until the first one is completed. Distinct downstreams can
make concurrent LIST requests without any request buffering.
Each RPL_LIST message is forwarded to the downstream of the first
matching pending LIST struct.
When an upstream sends an RPL_LISTEND message, the upstream is removed
from the first matching pending LIST struct, but that message is not
immediately forwarded downstream. If there are no remaining pending LIST
requests in that struct is then empty, that means all upstreams have
sent back all their RPL_LISTEND replies (which means they also sent all
their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream
and that pending LIST set is removed from the cache.
Upstreams are removed from the pending LIST structs in two other cases:
- when they are closed (to avoid stalling because of a disconnected
upstream that will never reply to the LIST message): they are removed
from all pending LIST structs
- when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply,
which is typically used when a user is not allowed to LIST because they
just joined the server: they are removed from the first pending LIST
struct, as if an RPL_LISTEND message was received
2020-03-26 01:40:30 +00:00
|
|
|
case irc.RPL_LISTSTART:
|
|
|
|
// Ignore
|
2020-08-13 13:30:41 +00:00
|
|
|
case "ERROR":
|
|
|
|
var text string
|
|
|
|
if err := parseMessageParams(msg, &text); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return fmt.Errorf("fatal server error: %v", text)
|
2021-12-04 19:07:23 +00:00
|
|
|
case irc.ERR_NICKNAMEINUSE:
|
|
|
|
// At this point, we haven't received ISUPPORT so we don't know the
|
|
|
|
// maximum nickname length or whether the server supports MONITOR. Many
|
|
|
|
// servers have NICKLEN=30 so let's just use that.
|
|
|
|
if !uc.registered && len(uc.nick)+1 < 30 {
|
|
|
|
uc.nick = uc.nick + "_"
|
2022-08-22 19:59:52 +00:00
|
|
|
uc.hasDesiredNick = false
|
2021-12-04 19:07:23 +00:00
|
|
|
uc.logger.Printf("desired nick is not available, falling back to %q", uc.nick)
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2021-12-04 19:07:23 +00:00
|
|
|
Command: "NICK",
|
|
|
|
Params: []string{uc.nick},
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
2022-07-14 10:25:47 +00:00
|
|
|
|
|
|
|
var failedNick string
|
|
|
|
if err := parseMessageParams(msg, nil, &failedNick); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if uc.network.equalCasemap(uc.pendingRegainNick, failedNick) {
|
|
|
|
// This message comes from our own logic to try to regain our
|
|
|
|
// desired nick, don't relay to downstream connections
|
|
|
|
uc.pendingRegainNick = ""
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-12-04 19:07:23 +00:00
|
|
|
fallthrough
|
|
|
|
case irc.ERR_PASSWDMISMATCH, irc.ERR_ERRONEUSNICKNAME, irc.ERR_NICKCOLLISION, irc.ERR_UNAVAILRESOURCE, irc.ERR_NOPERMFORHOST, irc.ERR_YOUREBANNEDCREEP:
|
2020-06-29 15:52:49 +00:00
|
|
|
if !uc.registered {
|
2021-12-02 16:33:11 +00:00
|
|
|
return registrationError{msg}
|
2020-06-29 15:52:49 +00:00
|
|
|
}
|
|
|
|
fallthrough
|
2020-02-06 15:18:19 +00:00
|
|
|
default:
|
2020-03-13 14:12:44 +00:00
|
|
|
uc.logger.Printf("unhandled message: %v", msg)
|
2020-07-08 16:21:52 +00:00
|
|
|
|
|
|
|
uc.forEachDownstreamByID(downstreamID, func(dc *downstreamConn) {
|
|
|
|
// best effort marshaling for unknown messages, replies and errors:
|
|
|
|
// most numerics start with the user nick, marshal it if that's the case
|
|
|
|
// otherwise, conservately keep the params without marshaling
|
|
|
|
params := msg.Params
|
|
|
|
if _, err := strconv.Atoi(msg.Command); err == nil { // numeric
|
|
|
|
if len(msg.Params) > 0 && isOurNick(uc.network, msg.Params[0]) {
|
|
|
|
params[0] = dc.nick
|
2020-05-19 15:33:44 +00:00
|
|
|
}
|
2020-07-08 16:21:52 +00:00
|
|
|
}
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: uc.srv.prefix(),
|
|
|
|
Command: msg.Command,
|
|
|
|
Params: params,
|
2020-05-19 15:33:44 +00:00
|
|
|
})
|
2020-07-08 16:21:52 +00:00
|
|
|
})
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
2020-02-06 15:39:09 +00:00
|
|
|
return nil
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func (uc *upstreamConn) handleDetachedMessage(ctx context.Context, ch *database.Channel, msg *irc.Message) {
|
2021-04-13 17:11:05 +00:00
|
|
|
if uc.network.detachedMessageNeedsRelay(ch, msg) {
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2021-04-13 17:11:05 +00:00
|
|
|
dc.relayDetachedMessage(uc.network, msg)
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
})
|
|
|
|
}
|
2022-05-09 10:34:43 +00:00
|
|
|
if ch.ReattachOn == database.FilterMessage || (ch.ReattachOn == database.FilterHighlight && uc.network.isHighlight(msg)) {
|
2021-01-29 15:57:38 +00:00
|
|
|
uc.network.attach(ctx, ch)
|
2021-12-02 22:27:12 +00:00
|
|
|
if err := uc.srv.db.StoreChannel(ctx, uc.network.ID, ch); err != nil {
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
uc.logger.Printf("failed to update channel %q: %v", ch.Name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-15 21:53:46 +00:00
|
|
|
func (uc *upstreamConn) handleChanModes(s string) error {
|
|
|
|
parts := strings.SplitN(s, ",", 5)
|
|
|
|
if len(parts) < 4 {
|
|
|
|
return fmt.Errorf("malformed ISUPPORT CHANMODES value: %v", s)
|
|
|
|
}
|
|
|
|
modes := make(map[byte]channelModeType)
|
|
|
|
for i, mt := range []channelModeType{modeTypeA, modeTypeB, modeTypeC, modeTypeD} {
|
|
|
|
for j := 0; j < len(parts[i]); j++ {
|
|
|
|
mode := parts[i][j]
|
|
|
|
modes[mode] = mt
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uc.availableChannelModes = modes
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (uc *upstreamConn) handleMemberships(s string) error {
|
|
|
|
if s == "" {
|
|
|
|
uc.availableMemberships = nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if s[0] != '(' {
|
|
|
|
return fmt.Errorf("malformed ISUPPORT PREFIX value: %v", s)
|
|
|
|
}
|
|
|
|
sep := strings.IndexByte(s, ')')
|
|
|
|
if sep < 0 || len(s) != sep*2 {
|
|
|
|
return fmt.Errorf("malformed ISUPPORT PREFIX value: %v", s)
|
|
|
|
}
|
2022-05-30 07:12:28 +00:00
|
|
|
memberships := make([]xirc.Membership, len(s)/2-1)
|
2021-03-15 21:53:46 +00:00
|
|
|
for i := range memberships {
|
2022-05-30 07:12:28 +00:00
|
|
|
memberships[i] = xirc.Membership{
|
2021-03-15 21:53:46 +00:00
|
|
|
Mode: s[i+1],
|
|
|
|
Prefix: s[sep+i+1],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uc.availableMemberships = memberships
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-30 13:27:41 +00:00
|
|
|
func (uc *upstreamConn) handleSupportedCaps(capsStr string) {
|
|
|
|
caps := strings.Fields(capsStr)
|
|
|
|
for _, s := range caps {
|
|
|
|
kv := strings.SplitN(s, "=", 2)
|
|
|
|
k := strings.ToLower(kv[0])
|
|
|
|
var v string
|
|
|
|
if len(kv) == 2 {
|
|
|
|
v = kv[1]
|
|
|
|
}
|
2022-03-14 18:24:39 +00:00
|
|
|
uc.caps.Available[k] = v
|
2020-04-30 13:27:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-10 16:05:12 +00:00
|
|
|
func (uc *upstreamConn) updateCaps(ctx context.Context) {
|
2020-04-30 13:27:41 +00:00
|
|
|
var requestCaps []string
|
2020-04-30 14:10:39 +00:00
|
|
|
for c := range permanentUpstreamCaps {
|
2022-03-14 18:24:39 +00:00
|
|
|
if uc.caps.IsAvailable(c) && !uc.caps.IsEnabled(c) {
|
2020-04-30 13:27:41 +00:00
|
|
|
requestCaps = append(requestCaps, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-10 16:05:12 +00:00
|
|
|
echoMessage := uc.caps.IsAvailable("labeled-response")
|
|
|
|
if !uc.caps.IsEnabled("echo-message") && echoMessage {
|
|
|
|
requestCaps = append(requestCaps, "echo-message")
|
|
|
|
} else if uc.caps.IsEnabled("echo-message") && !echoMessage {
|
|
|
|
requestCaps = append(requestCaps, "-echo-message")
|
|
|
|
}
|
|
|
|
|
2020-04-30 14:10:39 +00:00
|
|
|
if len(requestCaps) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-04-04 11:43:00 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-04-30 14:10:39 +00:00
|
|
|
Command: "CAP",
|
|
|
|
Params: []string{"REQ", strings.Join(requestCaps, " ")},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-11-21 15:28:38 +00:00
|
|
|
func (uc *upstreamConn) supportsSASL(mech string) bool {
|
2022-03-14 18:24:39 +00:00
|
|
|
v, ok := uc.caps.Available["sasl"]
|
2020-04-30 14:10:39 +00:00
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
2021-11-21 15:28:38 +00:00
|
|
|
|
|
|
|
if v == "" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
mechanisms := strings.Split(v, ",")
|
|
|
|
for _, mech := range mechanisms {
|
|
|
|
if strings.EqualFold(mech, mech) {
|
|
|
|
return true
|
2020-04-30 14:10:39 +00:00
|
|
|
}
|
|
|
|
}
|
2021-11-21 15:28:38 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-04-30 14:10:39 +00:00
|
|
|
|
2021-11-21 15:28:38 +00:00
|
|
|
func (uc *upstreamConn) requestSASL() bool {
|
|
|
|
if uc.network.SASL.Mechanism == "" {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return uc.supportsSASL(uc.network.SASL.Mechanism)
|
2020-04-30 14:10:39 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 11:28:16 +00:00
|
|
|
func (uc *upstreamConn) handleCapAck(ctx context.Context, name string, ok bool) error {
|
2022-03-14 18:24:39 +00:00
|
|
|
uc.caps.SetEnabled(name, ok)
|
2020-04-30 14:10:39 +00:00
|
|
|
|
|
|
|
switch name {
|
|
|
|
case "sasl":
|
2021-11-21 15:10:54 +00:00
|
|
|
if !uc.requestSASL() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-04-30 14:10:39 +00:00
|
|
|
if !ok {
|
|
|
|
uc.logger.Printf("server refused to acknowledge the SASL capability")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
auth := &uc.network.SASL
|
|
|
|
switch auth.Mechanism {
|
|
|
|
case "PLAIN":
|
|
|
|
uc.logger.Printf("starting SASL PLAIN authentication with username %q", auth.Plain.Username)
|
|
|
|
uc.saslClient = sasl.NewPlainClient("", auth.Plain.Username, auth.Plain.Password)
|
2020-05-29 11:10:54 +00:00
|
|
|
case "EXTERNAL":
|
|
|
|
uc.logger.Printf("starting SASL EXTERNAL authentication")
|
|
|
|
uc.saslClient = sasl.NewExternalClient("")
|
2020-04-30 14:10:39 +00:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("unsupported SASL mechanism %q", name)
|
|
|
|
}
|
|
|
|
|
2021-12-10 11:28:16 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-04-30 14:10:39 +00:00
|
|
|
Command: "AUTHENTICATE",
|
|
|
|
Params: []string{auth.Mechanism},
|
2020-04-30 13:27:41 +00:00
|
|
|
})
|
2022-04-10 16:05:12 +00:00
|
|
|
case "echo-message":
|
2020-04-30 14:10:39 +00:00
|
|
|
default:
|
|
|
|
if permanentUpstreamCaps[name] {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
uc.logger.Printf("received CAP ACK/NAK for a cap we don't support: %v", name)
|
2020-04-30 13:27:41 +00:00
|
|
|
}
|
2020-04-30 14:10:39 +00:00
|
|
|
return nil
|
2020-04-30 13:27:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 19:09:38 +00:00
|
|
|
func splitSpace(s string) []string {
|
|
|
|
return strings.FieldsFunc(s, func(r rune) bool {
|
|
|
|
return r == ' '
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-02-08 15:38:34 +00:00
|
|
|
func (uc *upstreamConn) register(ctx context.Context) {
|
2022-05-09 10:34:43 +00:00
|
|
|
uc.nick = database.GetNick(&uc.user.User, &uc.network.Network)
|
|
|
|
uc.username = database.GetUsername(&uc.user.User, &uc.network.Network)
|
|
|
|
uc.realname = database.GetRealname(&uc.user.User, &uc.network.Network)
|
2020-03-04 17:22:58 +00:00
|
|
|
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-03-13 10:26:43 +00:00
|
|
|
Command: "CAP",
|
|
|
|
Params: []string{"LS", "302"},
|
|
|
|
})
|
|
|
|
|
2020-03-13 11:06:02 +00:00
|
|
|
if uc.network.Pass != "" {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-03-13 11:06:02 +00:00
|
|
|
Command: "PASS",
|
|
|
|
Params: []string{uc.network.Pass},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-02-06 15:18:19 +00:00
|
|
|
Command: "NICK",
|
2020-02-19 17:25:19 +00:00
|
|
|
Params: []string{uc.nick},
|
2020-02-17 15:17:31 +00:00
|
|
|
})
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-02-06 15:18:19 +00:00
|
|
|
Command: "USER",
|
2020-03-04 17:22:58 +00:00
|
|
|
Params: []string{uc.username, "0", "*", uc.realname},
|
2020-02-17 15:17:31 +00:00
|
|
|
})
|
2020-02-07 11:37:44 +00:00
|
|
|
}
|
|
|
|
|
2021-11-15 20:11:23 +00:00
|
|
|
func (uc *upstreamConn) ReadMessage() (*irc.Message, error) {
|
|
|
|
msg, err := uc.conn.ReadMessage()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
uc.srv.metrics.upstreamInMessagesTotal.Inc()
|
|
|
|
return msg, nil
|
|
|
|
}
|
|
|
|
|
2022-02-08 15:37:31 +00:00
|
|
|
func (uc *upstreamConn) runUntilRegistered(ctx context.Context) error {
|
2020-04-01 10:14:36 +00:00
|
|
|
for !uc.registered {
|
2020-04-03 15:01:25 +00:00
|
|
|
msg, err := uc.ReadMessage()
|
2020-04-01 10:14:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to read message: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-02-08 15:37:31 +00:00
|
|
|
if err := uc.handleMessage(ctx, msg); err != nil {
|
2020-08-19 21:35:12 +00:00
|
|
|
if _, ok := err.(registrationError); ok {
|
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
msg.Tags = nil // prevent message tags from cluttering logs
|
|
|
|
return fmt.Errorf("failed to handle message %q: %v", msg, err)
|
|
|
|
}
|
2020-04-01 10:14:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-15 23:40:50 +00:00
|
|
|
for _, command := range uc.network.ConnectCommands {
|
|
|
|
m, err := irc.ParseMessage(command)
|
|
|
|
if err != nil {
|
|
|
|
uc.logger.Printf("failed to parse connect command %q: %v", command, err)
|
|
|
|
} else {
|
2022-02-08 15:37:31 +00:00
|
|
|
uc.SendMessage(ctx, m)
|
2020-04-15 23:40:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-01 10:14:36 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
func (uc *upstreamConn) readMessages(ch chan<- event) error {
|
2020-02-06 15:18:19 +00:00
|
|
|
for {
|
2020-04-03 14:34:11 +00:00
|
|
|
msg, err := uc.ReadMessage()
|
2021-10-29 14:03:04 +00:00
|
|
|
if errors.Is(err, io.EOF) {
|
2020-02-06 15:18:19 +00:00
|
|
|
break
|
|
|
|
} else if err != nil {
|
|
|
|
return fmt.Errorf("failed to read IRC command: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
ch <- eventUpstreamMessage{msg, uc}
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 11:42:24 +00:00
|
|
|
return nil
|
2020-02-06 15:18:19 +00:00
|
|
|
}
|
2020-02-17 15:17:31 +00:00
|
|
|
|
2021-12-08 17:03:40 +00:00
|
|
|
func (uc *upstreamConn) SendMessage(ctx context.Context, msg *irc.Message) {
|
2022-03-14 18:24:39 +00:00
|
|
|
if !uc.caps.IsEnabled("message-tags") {
|
2020-05-21 05:04:34 +00:00
|
|
|
msg = msg.Copy()
|
|
|
|
msg.Tags = nil
|
|
|
|
}
|
|
|
|
|
2021-11-15 20:11:23 +00:00
|
|
|
uc.srv.metrics.upstreamOutMessagesTotal.Inc()
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.conn.SendMessage(ctx, msg)
|
2020-05-21 05:04:34 +00:00
|
|
|
}
|
|
|
|
|
2021-12-08 17:03:40 +00:00
|
|
|
func (uc *upstreamConn) SendMessageLabeled(ctx context.Context, downstreamID uint64, msg *irc.Message) {
|
2022-03-14 18:24:39 +00:00
|
|
|
if uc.caps.IsEnabled("labeled-response") {
|
2020-03-23 02:21:43 +00:00
|
|
|
if msg.Tags == nil {
|
|
|
|
msg.Tags = make(map[string]irc.TagValue)
|
|
|
|
}
|
2020-03-26 03:30:11 +00:00
|
|
|
msg.Tags["label"] = irc.TagValue(fmt.Sprintf("sd-%d-%d", downstreamID, uc.nextLabelID))
|
2020-03-26 01:39:04 +00:00
|
|
|
uc.nextLabelID++
|
2020-03-23 02:21:43 +00:00
|
|
|
}
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, msg)
|
2020-03-23 02:21:43 +00:00
|
|
|
}
|
2020-03-25 22:51:28 +00:00
|
|
|
|
2020-08-28 15:21:08 +00:00
|
|
|
// appendLog appends a message to the log file.
|
|
|
|
//
|
|
|
|
// The internal message ID is returned. If the message isn't recorded in the
|
|
|
|
// log file, an empty string is returned.
|
|
|
|
func (uc *upstreamConn) appendLog(entity string, msg *irc.Message) (msgID string) {
|
2020-10-25 10:13:51 +00:00
|
|
|
if uc.user.msgStore == nil {
|
2020-08-28 15:21:08 +00:00
|
|
|
return ""
|
2020-03-25 22:51:28 +00:00
|
|
|
}
|
2021-03-30 10:28:45 +00:00
|
|
|
|
2021-06-23 17:21:18 +00:00
|
|
|
// Don't store messages with a server mask target
|
|
|
|
if strings.HasPrefix(entity, "$") {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2021-03-30 10:28:45 +00:00
|
|
|
entityCM := uc.network.casemap(entity)
|
|
|
|
if entityCM == "nickserv" {
|
2021-03-16 08:54:29 +00:00
|
|
|
// The messages sent/received from NickServ may contain
|
|
|
|
// security-related information (like passwords). Don't store these.
|
|
|
|
return ""
|
|
|
|
}
|
2020-03-25 22:51:28 +00:00
|
|
|
|
2021-03-29 15:49:50 +00:00
|
|
|
if !uc.network.delivered.HasTarget(entity) {
|
2021-03-29 14:55:57 +00:00
|
|
|
// This is the first message we receive from this target. Save the last
|
|
|
|
// message ID in delivery receipts, so that we can send the new message
|
|
|
|
// in the backlog if an offline client reconnects.
|
2021-11-03 15:37:01 +00:00
|
|
|
lastID, err := uc.user.msgStore.LastMsgID(&uc.network.Network, entityCM, time.Now())
|
2020-08-19 11:24:05 +00:00
|
|
|
if err != nil {
|
|
|
|
uc.logger.Printf("failed to log message: failed to get last message ID: %v", err)
|
2020-08-28 15:21:08 +00:00
|
|
|
return ""
|
2020-08-19 11:24:05 +00:00
|
|
|
}
|
|
|
|
|
2021-02-10 17:16:08 +00:00
|
|
|
uc.network.delivered.ForEachClient(func(clientName string) {
|
2021-03-29 15:49:50 +00:00
|
|
|
uc.network.delivered.StoreID(entity, clientName, lastID)
|
2021-02-10 17:16:08 +00:00
|
|
|
})
|
2020-04-10 17:22:47 +00:00
|
|
|
}
|
|
|
|
|
2021-11-03 15:37:01 +00:00
|
|
|
msgID, err := uc.user.msgStore.Append(&uc.network.Network, entityCM, msg)
|
2020-08-19 11:24:05 +00:00
|
|
|
if err != nil {
|
2021-12-06 17:46:10 +00:00
|
|
|
uc.logger.Printf("failed to append message to store: %v", err)
|
2020-08-28 15:21:08 +00:00
|
|
|
return ""
|
2020-08-19 11:24:05 +00:00
|
|
|
}
|
2020-08-11 15:12:06 +00:00
|
|
|
|
2020-08-28 15:21:08 +00:00
|
|
|
return msgID
|
2020-04-10 17:22:47 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 11:24:05 +00:00
|
|
|
// produce appends a message to the logs and forwards it to connected downstream
|
|
|
|
// connections.
|
2020-04-07 17:45:29 +00:00
|
|
|
//
|
2022-04-10 16:05:12 +00:00
|
|
|
// originID is the id of the downstream (origin) that sent the message. If it is not 0
|
|
|
|
// and origin doesn't support echo-message, the message is forwarded to all
|
|
|
|
// connections except origin.
|
|
|
|
func (uc *upstreamConn) produce(target string, msg *irc.Message, originID uint64) {
|
2020-08-28 15:21:08 +00:00
|
|
|
var msgID string
|
2020-04-06 19:42:55 +00:00
|
|
|
if target != "" {
|
2020-08-28 15:21:08 +00:00
|
|
|
msgID = uc.appendLog(target, msg)
|
2020-04-06 19:42:55 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 13:27:41 +00:00
|
|
|
// Don't forward messages if it's a detached channel
|
2022-06-06 07:58:39 +00:00
|
|
|
ch := uc.network.channels.Get(target)
|
2021-04-13 17:11:05 +00:00
|
|
|
detached := ch != nil && ch.Detached
|
2020-04-28 13:27:41 +00:00
|
|
|
|
2020-04-06 15:51:42 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2022-08-21 20:08:10 +00:00
|
|
|
echo := dc.id == originID && msg.Prefix != nil && uc.isOurNick(msg.Prefix.Name)
|
|
|
|
if !detached && (!echo || dc.caps.IsEnabled("echo-message")) {
|
2020-08-28 15:21:08 +00:00
|
|
|
dc.sendMessageWithID(dc.marshalMessage(msg, uc.network), msgID)
|
|
|
|
} else {
|
|
|
|
dc.advanceMessageWithID(msg, msgID)
|
2020-04-06 19:34:45 +00:00
|
|
|
}
|
2020-04-06 15:51:42 +00:00
|
|
|
})
|
2020-04-06 15:03:07 +00:00
|
|
|
}
|
|
|
|
|
2020-04-01 10:16:32 +00:00
|
|
|
func (uc *upstreamConn) updateAway() {
|
2021-12-08 17:03:40 +00:00
|
|
|
ctx := context.TODO()
|
|
|
|
|
2020-04-01 10:16:32 +00:00
|
|
|
away := true
|
2022-07-11 17:36:12 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
if dc.away == nil {
|
|
|
|
away = false
|
|
|
|
}
|
2020-04-01 10:16:32 +00:00
|
|
|
})
|
|
|
|
if away == uc.away {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if away {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-04-01 10:16:32 +00:00
|
|
|
Command: "AWAY",
|
|
|
|
Params: []string{"Auto away"},
|
|
|
|
})
|
|
|
|
} else {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2020-04-01 10:16:32 +00:00
|
|
|
Command: "AWAY",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
uc.away = away
|
|
|
|
}
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
|
|
|
|
func (uc *upstreamConn) updateChannelAutoDetach(name string) {
|
2022-06-06 07:58:39 +00:00
|
|
|
uch := uc.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uch == nil {
|
|
|
|
return
|
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
ch := uc.network.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if ch == nil || ch.Detached {
|
|
|
|
return
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
}
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
uch.updateAutoDetach(ch.DetachAfter)
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
}
|
2021-11-09 16:59:43 +00:00
|
|
|
|
|
|
|
func (uc *upstreamConn) updateMonitor() {
|
2021-12-04 18:29:39 +00:00
|
|
|
if _, ok := uc.isupport["MONITOR"]; !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:03:40 +00:00
|
|
|
ctx := context.TODO()
|
|
|
|
|
2021-11-09 16:59:43 +00:00
|
|
|
add := make(map[string]struct{})
|
|
|
|
var addList []string
|
|
|
|
seen := make(map[string]struct{})
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2022-06-06 07:58:39 +00:00
|
|
|
for _, entry := range dc.monitored.m {
|
2022-03-08 20:27:43 +00:00
|
|
|
targetCM := uc.network.casemap(entry.originalKey)
|
2022-03-08 20:29:04 +00:00
|
|
|
if targetCM == serviceNickCM {
|
|
|
|
continue
|
|
|
|
}
|
2021-11-09 16:59:43 +00:00
|
|
|
if !uc.monitored.Has(targetCM) {
|
|
|
|
if _, ok := add[targetCM]; !ok {
|
|
|
|
addList = append(addList, targetCM)
|
2021-12-04 19:07:23 +00:00
|
|
|
add[targetCM] = struct{}{}
|
2021-11-09 16:59:43 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
seen[targetCM] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
|
2021-12-04 19:07:23 +00:00
|
|
|
wantNickCM := uc.network.casemap(wantNick)
|
2022-08-23 07:53:11 +00:00
|
|
|
if _, ok := add[wantNickCM]; !ok && !uc.monitored.Has(wantNick) && !uc.isOurNick(wantNick) && !uc.hasDesiredNick {
|
2021-12-04 19:07:23 +00:00
|
|
|
addList = append(addList, wantNickCM)
|
|
|
|
add[wantNickCM] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2021-11-09 16:59:43 +00:00
|
|
|
removeAll := true
|
|
|
|
var removeList []string
|
2022-06-06 07:58:39 +00:00
|
|
|
uc.monitored.ForEach(func(nick string, online bool) {
|
|
|
|
if _, ok := seen[uc.network.casemap(nick)]; ok {
|
2021-11-09 16:59:43 +00:00
|
|
|
removeAll = false
|
|
|
|
} else {
|
2022-06-06 07:58:39 +00:00
|
|
|
removeList = append(removeList, nick)
|
2021-11-09 16:59:43 +00:00
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
})
|
2021-11-09 16:59:43 +00:00
|
|
|
|
|
|
|
// TODO: better handle the case where len(uc.monitored) + len(addList)
|
|
|
|
// exceeds the limit, probably by immediately sending ERR_MONLISTFULL?
|
|
|
|
|
|
|
|
if removeAll && len(addList) == 0 && len(removeList) > 0 {
|
|
|
|
// Optimization when the last MONITOR-aware downstream disconnects
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
2021-11-09 16:59:43 +00:00
|
|
|
Command: "MONITOR",
|
|
|
|
Params: []string{"C"},
|
|
|
|
})
|
|
|
|
} else {
|
2022-05-29 15:57:21 +00:00
|
|
|
msgs := xirc.GenerateMonitor("-", removeList)
|
|
|
|
msgs = append(msgs, xirc.GenerateMonitor("+", addList)...)
|
2021-11-09 16:59:43 +00:00
|
|
|
for _, msg := range msgs {
|
2021-12-08 17:03:40 +00:00
|
|
|
uc.SendMessage(ctx, msg)
|
2021-11-09 16:59:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, target := range removeList {
|
2022-06-06 07:58:39 +00:00
|
|
|
uc.monitored.Del(target)
|
2021-11-09 16:59:43 +00:00
|
|
|
}
|
|
|
|
}
|
2022-07-14 10:25:47 +00:00
|
|
|
|
|
|
|
func (uc *upstreamConn) stopRegainNickTimer() {
|
|
|
|
if uc.regainNickTimer != nil {
|
|
|
|
uc.regainNickTimer.Stop()
|
|
|
|
// Maybe we're racing with the timer goroutine, so maybe we'll receive
|
|
|
|
// an eventTryRegainNick later on, but tryRegainNick handles that case
|
|
|
|
}
|
|
|
|
uc.regainNickTimer = nil
|
|
|
|
uc.regainNickBackoff = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (uc *upstreamConn) startRegainNickTimer() {
|
|
|
|
if uc.regainNickBackoff != nil || uc.regainNickTimer != nil {
|
|
|
|
panic("startRegainNickTimer called twice")
|
|
|
|
}
|
|
|
|
|
|
|
|
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
|
|
|
|
if uc.isOurNick(wantNick) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
min = 15 * time.Second
|
|
|
|
max = 10 * time.Minute
|
|
|
|
jitter = 10 * time.Second
|
|
|
|
)
|
|
|
|
uc.regainNickBackoff = newBackoffer(min, max, jitter)
|
|
|
|
uc.regainNickTimer = time.AfterFunc(uc.regainNickBackoff.Next(), func() {
|
|
|
|
e := eventTryRegainNick{uc: uc, nick: wantNick}
|
|
|
|
select {
|
|
|
|
case uc.network.user.events <- e:
|
|
|
|
// ok
|
|
|
|
default:
|
|
|
|
uc.logger.Printf("skipping nick regain attempt: event queue is full")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (uc *upstreamConn) tryRegainNick(nick string) {
|
|
|
|
ctx := context.TODO()
|
|
|
|
|
|
|
|
if uc.regainNickTimer == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Maybe the user has updated their desired nick
|
|
|
|
wantNick := database.GetNick(&uc.user.User, &uc.network.Network)
|
|
|
|
if wantNick != nick || uc.isOurNick(wantNick) {
|
|
|
|
uc.stopRegainNickTimer()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.regainNickTimer.Reset(uc.regainNickBackoff.Next())
|
|
|
|
|
|
|
|
if uc.pendingRegainNick != "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
|
|
|
Command: "NICK",
|
|
|
|
Params: []string{wantNick},
|
|
|
|
})
|
|
|
|
uc.pendingRegainNick = wantNick
|
|
|
|
}
|