2020-03-16 10:18:41 +00:00
|
|
|
package soju
|
|
|
|
|
|
|
|
import (
|
2021-10-18 17:15:15 +00:00
|
|
|
"context"
|
2020-08-11 08:36:14 +00:00
|
|
|
"crypto/sha256"
|
|
|
|
"encoding/binary"
|
2020-08-19 09:24:25 +00:00
|
|
|
"encoding/hex"
|
2022-04-15 07:37:43 +00:00
|
|
|
"errors"
|
2020-04-04 02:48:25 +00:00
|
|
|
"fmt"
|
2021-10-21 17:14:39 +00:00
|
|
|
"math/big"
|
|
|
|
"net"
|
2022-02-04 14:03:13 +00:00
|
|
|
"sort"
|
2022-02-07 20:38:36 +00:00
|
|
|
"strings"
|
2023-01-26 15:57:07 +00:00
|
|
|
"sync/atomic"
|
2020-03-16 10:18:41 +00:00
|
|
|
"time"
|
2020-03-16 11:44:59 +00:00
|
|
|
|
2022-10-18 12:57:33 +00:00
|
|
|
"git.sr.ht/~emersion/soju/xirc"
|
|
|
|
|
2021-11-27 10:48:10 +00:00
|
|
|
"github.com/SherClockHolmes/webpush-go"
|
2022-11-14 11:06:58 +00:00
|
|
|
"gopkg.in/irc.v4"
|
2022-05-09 10:34:43 +00:00
|
|
|
|
|
|
|
"git.sr.ht/~emersion/soju/database"
|
2022-05-09 14:25:57 +00:00
|
|
|
"git.sr.ht/~emersion/soju/msgstore"
|
2020-03-16 10:18:41 +00:00
|
|
|
)
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
type event interface{}
|
|
|
|
|
|
|
|
type eventUpstreamMessage struct {
|
2020-03-16 11:44:59 +00:00
|
|
|
msg *irc.Message
|
|
|
|
uc *upstreamConn
|
|
|
|
}
|
|
|
|
|
2020-04-04 02:48:25 +00:00
|
|
|
type eventUpstreamConnectionError struct {
|
|
|
|
net *network
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2020-04-01 10:05:25 +00:00
|
|
|
type eventUpstreamConnected struct {
|
|
|
|
uc *upstreamConn
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:51:58 +00:00
|
|
|
type eventUpstreamDisconnected struct {
|
|
|
|
uc *upstreamConn
|
|
|
|
}
|
|
|
|
|
2020-04-04 02:48:25 +00:00
|
|
|
type eventUpstreamError struct {
|
|
|
|
uc *upstreamConn
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
type eventDownstreamMessage struct {
|
2020-03-16 11:44:59 +00:00
|
|
|
msg *irc.Message
|
|
|
|
dc *downstreamConn
|
|
|
|
}
|
|
|
|
|
2020-03-27 16:21:05 +00:00
|
|
|
type eventDownstreamConnected struct {
|
|
|
|
dc *downstreamConn
|
|
|
|
}
|
|
|
|
|
2020-03-27 16:55:03 +00:00
|
|
|
type eventDownstreamDisconnected struct {
|
|
|
|
dc *downstreamConn
|
|
|
|
}
|
|
|
|
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
type eventChannelDetach struct {
|
|
|
|
uc *upstreamConn
|
|
|
|
name string
|
|
|
|
}
|
|
|
|
|
2021-06-23 17:21:18 +00:00
|
|
|
type eventBroadcast struct {
|
|
|
|
msg *irc.Message
|
|
|
|
}
|
|
|
|
|
2020-08-03 16:45:13 +00:00
|
|
|
type eventStop struct{}
|
|
|
|
|
2021-10-12 07:11:14 +00:00
|
|
|
type eventUserUpdate struct {
|
|
|
|
password *string
|
|
|
|
admin *bool
|
2023-01-26 17:33:55 +00:00
|
|
|
enabled *bool
|
2021-10-12 07:11:14 +00:00
|
|
|
done chan error
|
|
|
|
}
|
|
|
|
|
2022-07-14 10:25:47 +00:00
|
|
|
type eventTryRegainNick struct {
|
|
|
|
uc *upstreamConn
|
|
|
|
nick string
|
|
|
|
}
|
|
|
|
|
2023-01-17 13:03:13 +00:00
|
|
|
type eventUserRun struct {
|
|
|
|
params []string
|
|
|
|
print chan string
|
2023-01-20 14:46:42 +00:00
|
|
|
ret chan error
|
2023-01-17 13:03:13 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 10:19:58 +00:00
|
|
|
type deliveredClientMap map[string]string // client name -> msg ID
|
|
|
|
|
2021-03-29 15:49:50 +00:00
|
|
|
type deliveredStore struct {
|
|
|
|
m deliveredCasemapMap
|
|
|
|
}
|
|
|
|
|
|
|
|
func newDeliveredStore() deliveredStore {
|
2022-06-06 07:23:17 +00:00
|
|
|
return deliveredStore{deliveredCasemapMap{newCasemapMap()}}
|
2021-03-29 15:49:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ds deliveredStore) HasTarget(target string) bool {
|
2022-06-06 07:58:39 +00:00
|
|
|
return ds.m.Get(target) != nil
|
2021-03-29 15:49:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ds deliveredStore) LoadID(target, clientName string) string {
|
2022-06-06 07:58:39 +00:00
|
|
|
clients := ds.m.Get(target)
|
2021-03-29 15:49:50 +00:00
|
|
|
if clients == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return clients[clientName]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ds deliveredStore) StoreID(target, clientName, msgID string) {
|
2022-06-06 07:58:39 +00:00
|
|
|
clients := ds.m.Get(target)
|
2021-03-29 15:49:50 +00:00
|
|
|
if clients == nil {
|
|
|
|
clients = make(deliveredClientMap)
|
2022-06-06 07:58:39 +00:00
|
|
|
ds.m.Set(target, clients)
|
2021-03-29 15:49:50 +00:00
|
|
|
}
|
|
|
|
clients[clientName] = msgID
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ds deliveredStore) ForEachTarget(f func(target string)) {
|
2022-06-06 07:58:39 +00:00
|
|
|
ds.m.ForEach(func(name string, _ deliveredClientMap) {
|
|
|
|
f(name)
|
|
|
|
})
|
2021-03-29 15:49:50 +00:00
|
|
|
}
|
|
|
|
|
2021-02-10 17:16:08 +00:00
|
|
|
func (ds deliveredStore) ForEachClient(f func(clientName string)) {
|
|
|
|
clients := make(map[string]struct{})
|
2022-06-06 07:58:39 +00:00
|
|
|
ds.m.ForEach(func(name string, delivered deliveredClientMap) {
|
2021-02-10 17:16:08 +00:00
|
|
|
for clientName := range delivered {
|
|
|
|
clients[clientName] = struct{}{}
|
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
})
|
2021-02-10 17:16:08 +00:00
|
|
|
|
|
|
|
for clientName := range clients {
|
|
|
|
f(clientName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
type network struct {
|
2022-05-09 10:34:43 +00:00
|
|
|
database.Network
|
2020-04-01 13:40:20 +00:00
|
|
|
user *user
|
2021-04-13 18:12:49 +00:00
|
|
|
logger Logger
|
2020-04-01 13:40:20 +00:00
|
|
|
stopped chan struct{}
|
2020-03-20 21:48:17 +00:00
|
|
|
|
2022-07-08 14:55:29 +00:00
|
|
|
conn *upstreamConn
|
|
|
|
channels channelCasemapMap
|
|
|
|
delivered deliveredStore
|
2022-10-24 12:56:35 +00:00
|
|
|
pushTargets pushTargetCasemapMap
|
2022-07-08 14:55:29 +00:00
|
|
|
lastError error
|
|
|
|
casemap casemapping
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func newNetwork(user *user, record *database.Network, channels []database.Channel) *network {
|
2021-04-13 18:12:49 +00:00
|
|
|
logger := &prefixLogger{user.logger, fmt.Sprintf("network %q: ", record.GetName())}
|
|
|
|
|
2022-06-06 07:23:17 +00:00
|
|
|
m := channelCasemapMap{newCasemapMap()}
|
2020-04-11 15:00:40 +00:00
|
|
|
for _, ch := range channels {
|
2020-05-01 00:02:41 +00:00
|
|
|
ch := ch
|
2022-06-06 08:04:50 +00:00
|
|
|
m.Set(&ch)
|
2020-04-11 15:00:40 +00:00
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
return &network{
|
2022-07-08 14:55:29 +00:00
|
|
|
Network: *record,
|
|
|
|
user: user,
|
|
|
|
logger: logger,
|
|
|
|
stopped: make(chan struct{}),
|
|
|
|
channels: m,
|
|
|
|
delivered: newDeliveredStore(),
|
2022-10-24 12:56:35 +00:00
|
|
|
pushTargets: pushTargetCasemapMap{newCasemapMap()},
|
2022-07-08 14:55:29 +00:00
|
|
|
casemap: casemapRFC1459,
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-04 02:48:25 +00:00
|
|
|
func (net *network) forEachDownstream(f func(*downstreamConn)) {
|
2022-04-15 08:32:28 +00:00
|
|
|
for _, dc := range net.user.downstreamConns {
|
2022-08-05 16:40:42 +00:00
|
|
|
if dc.network != net {
|
2022-04-24 16:50:59 +00:00
|
|
|
continue
|
2020-04-04 02:48:25 +00:00
|
|
|
}
|
|
|
|
f(dc)
|
2022-04-15 08:32:28 +00:00
|
|
|
}
|
2020-04-04 02:48:25 +00:00
|
|
|
}
|
|
|
|
|
2020-06-03 15:28:31 +00:00
|
|
|
func (net *network) isStopped() bool {
|
|
|
|
select {
|
|
|
|
case <-net.stopped:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-08 12:47:43 +00:00
|
|
|
func (net *network) equalCasemap(a, b string) bool {
|
|
|
|
return net.casemap(a) == net.casemap(b)
|
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func userIdent(u *database.User) string {
|
2020-08-11 08:36:14 +00:00
|
|
|
// The ident is a string we will send to upstream servers in clear-text.
|
|
|
|
// For privacy reasons, make sure it doesn't expose any meaningful user
|
|
|
|
// metadata. We just use the base64-encoded hashed ID, so that people don't
|
|
|
|
// start relying on the string being an integer or following a pattern.
|
|
|
|
var b [64]byte
|
|
|
|
binary.LittleEndian.PutUint64(b[:], uint64(u.ID))
|
|
|
|
h := sha256.Sum256(b[:])
|
2020-08-19 09:24:25 +00:00
|
|
|
return hex.EncodeToString(h[:16])
|
2020-08-11 08:36:14 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 07:37:43 +00:00
|
|
|
func (net *network) runConn(ctx context.Context) error {
|
|
|
|
net.user.srv.metrics.upstreams.Add(1)
|
|
|
|
defer net.user.srv.metrics.upstreams.Add(-1)
|
|
|
|
|
2022-04-15 07:49:19 +00:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
2022-04-15 07:37:43 +00:00
|
|
|
uc, err := connectToUpstream(ctx, net)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to connect: %w", err)
|
|
|
|
}
|
|
|
|
defer uc.Close()
|
|
|
|
|
|
|
|
if net.user.srv.Identd != nil {
|
|
|
|
net.user.srv.Identd.Store(uc.RemoteAddr().String(), uc.LocalAddr().String(), userIdent(&net.user.User))
|
|
|
|
defer net.user.srv.Identd.Delete(uc.RemoteAddr().String(), uc.LocalAddr().String())
|
|
|
|
}
|
|
|
|
|
2022-04-15 07:53:30 +00:00
|
|
|
// TODO: this is racy, we're not running in the user goroutine yet
|
|
|
|
// uc.register accesses user/network DB records
|
2022-04-15 07:37:43 +00:00
|
|
|
uc.register(ctx)
|
|
|
|
if err := uc.runUntilRegistered(ctx); err != nil {
|
|
|
|
return fmt.Errorf("failed to register: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: this is racy with net.stopped. If the network is stopped
|
|
|
|
// before the user goroutine receives eventUpstreamConnected, the
|
|
|
|
// connection won't be closed.
|
|
|
|
net.user.events <- eventUpstreamConnected{uc}
|
|
|
|
defer func() {
|
|
|
|
net.user.events <- eventUpstreamDisconnected{uc}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if err := uc.readMessages(net.user.events); err != nil {
|
|
|
|
return fmt.Errorf("failed to handle messages: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
func (net *network) run() {
|
2023-01-26 17:33:55 +00:00
|
|
|
if !net.user.Enabled || !net.Enabled {
|
2021-05-26 08:49:52 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
var lastTry time.Time
|
2021-12-02 11:12:23 +00:00
|
|
|
backoff := newBackoffer(retryConnectMinDelay, retryConnectMaxDelay, retryConnectJitter)
|
2020-03-16 10:18:41 +00:00
|
|
|
for {
|
2020-06-03 15:28:31 +00:00
|
|
|
if net.isStopped() {
|
2020-04-01 13:40:20 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-12-02 11:12:23 +00:00
|
|
|
delay := backoff.Next() - time.Now().Sub(lastTry)
|
|
|
|
if delay > 0 {
|
2021-04-13 18:12:49 +00:00
|
|
|
net.logger.Printf("waiting %v before trying to reconnect to %q", delay.Truncate(time.Second), net.Addr)
|
2020-03-16 10:18:41 +00:00
|
|
|
time.Sleep(delay)
|
|
|
|
}
|
|
|
|
lastTry = time.Now()
|
|
|
|
|
2022-04-15 07:37:43 +00:00
|
|
|
if err := net.runConn(context.TODO()); err != nil {
|
2020-08-19 21:35:12 +00:00
|
|
|
text := err.Error()
|
2021-12-02 16:33:11 +00:00
|
|
|
temp := true
|
2022-04-15 07:37:43 +00:00
|
|
|
var regErr registrationError
|
|
|
|
if errors.As(err, ®Err) {
|
|
|
|
text = "failed to register: " + regErr.Reason()
|
2021-12-02 16:33:11 +00:00
|
|
|
temp = regErr.Temporary()
|
2020-08-19 21:35:12 +00:00
|
|
|
}
|
2022-04-15 07:37:43 +00:00
|
|
|
|
|
|
|
net.logger.Printf("connection error to %q: %v", net.Addr, text)
|
|
|
|
net.user.events <- eventUpstreamConnectionError{net, fmt.Errorf("connection error: %v", err)}
|
2021-12-02 10:15:51 +00:00
|
|
|
net.user.srv.metrics.upstreamConnectErrorsTotal.Inc()
|
2022-04-15 07:37:43 +00:00
|
|
|
|
2021-12-02 16:33:11 +00:00
|
|
|
if !temp {
|
|
|
|
return
|
|
|
|
}
|
2022-04-15 07:37:43 +00:00
|
|
|
} else {
|
|
|
|
backoff.Reset()
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-03 14:18:56 +00:00
|
|
|
func (net *network) stop() {
|
2020-06-03 15:28:31 +00:00
|
|
|
if !net.isStopped() {
|
2020-04-01 13:40:20 +00:00
|
|
|
close(net.stopped)
|
|
|
|
}
|
|
|
|
|
2020-04-30 08:25:16 +00:00
|
|
|
if net.conn != nil {
|
|
|
|
net.conn.Close()
|
2020-04-01 13:40:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func (net *network) detach(ch *database.Channel) {
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
if ch.Detached {
|
|
|
|
return
|
2020-04-11 15:00:40 +00:00
|
|
|
}
|
2021-04-13 16:15:30 +00:00
|
|
|
|
2021-04-13 18:12:49 +00:00
|
|
|
net.logger.Printf("detaching channel %q", ch.Name)
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
|
2021-04-13 16:15:30 +00:00
|
|
|
ch.Detached = true
|
|
|
|
|
|
|
|
if net.user.msgStore != nil {
|
|
|
|
nameCM := net.casemap(ch.Name)
|
2021-11-03 15:37:01 +00:00
|
|
|
lastID, err := net.user.msgStore.LastMsgID(&net.Network, nameCM, time.Now())
|
2021-04-13 16:15:30 +00:00
|
|
|
if err != nil {
|
2021-04-13 18:12:49 +00:00
|
|
|
net.logger.Printf("failed to get last message ID for channel %q: %v", ch.Name, err)
|
2021-04-13 16:15:30 +00:00
|
|
|
}
|
|
|
|
ch.DetachedInternalMsgID = lastID
|
|
|
|
}
|
|
|
|
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
if net.conn != nil {
|
2022-06-06 07:58:39 +00:00
|
|
|
uch := net.conn.channels.Get(ch.Name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uch != nil {
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
uch.updateAutoDetach(0)
|
|
|
|
}
|
2020-04-05 13:04:52 +00:00
|
|
|
}
|
2020-04-28 13:27:41 +00:00
|
|
|
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
net.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.prefix(),
|
|
|
|
Command: "PART",
|
2022-08-05 17:14:03 +00:00
|
|
|
Params: []string{ch.Name, "Detach"},
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2020-04-28 13:27:41 +00:00
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func (net *network) attach(ctx context.Context, ch *database.Channel) {
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
if !ch.Detached {
|
|
|
|
return
|
|
|
|
}
|
2021-04-13 16:15:30 +00:00
|
|
|
|
2021-04-13 18:12:49 +00:00
|
|
|
net.logger.Printf("attaching channel %q", ch.Name)
|
2020-04-28 13:27:41 +00:00
|
|
|
|
2021-04-13 16:15:30 +00:00
|
|
|
detachedMsgID := ch.DetachedInternalMsgID
|
|
|
|
ch.Detached = false
|
|
|
|
ch.DetachedInternalMsgID = ""
|
|
|
|
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
var uch *upstreamChannel
|
|
|
|
if net.conn != nil {
|
2022-06-06 07:58:39 +00:00
|
|
|
uch = net.conn.channels.Get(ch.Name)
|
2020-04-28 13:27:41 +00:00
|
|
|
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
net.conn.updateChannelAutoDetach(ch.Name)
|
2020-04-28 13:27:41 +00:00
|
|
|
}
|
|
|
|
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
net.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.prefix(),
|
|
|
|
Command: "JOIN",
|
2022-08-05 17:14:03 +00:00
|
|
|
Params: []string{ch.Name},
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
if uch != nil {
|
2021-01-29 15:57:38 +00:00
|
|
|
forwardChannel(ctx, dc, uch)
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
}
|
|
|
|
|
2021-04-13 16:15:30 +00:00
|
|
|
if detachedMsgID != "" {
|
2021-01-29 15:57:38 +00:00
|
|
|
dc.sendTargetBacklog(ctx, net, ch.Name, detachedMsgID)
|
2021-04-13 15:49:37 +00:00
|
|
|
}
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
})
|
2020-04-05 13:04:52 +00:00
|
|
|
}
|
|
|
|
|
2021-11-08 18:36:10 +00:00
|
|
|
func (net *network) deleteChannel(ctx context.Context, name string) error {
|
2022-06-06 07:58:39 +00:00
|
|
|
ch := net.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if ch == nil {
|
2020-09-06 15:00:42 +00:00
|
|
|
return fmt.Errorf("unknown channel %q", name)
|
|
|
|
}
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
if net.conn != nil {
|
2022-06-06 07:58:39 +00:00
|
|
|
uch := net.conn.channels.Get(ch.Name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if uch != nil {
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
uch.updateAutoDetach(0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-08 18:36:10 +00:00
|
|
|
if err := net.user.srv.db.DeleteChannel(ctx, ch.ID); err != nil {
|
2020-04-11 15:00:40 +00:00
|
|
|
return err
|
|
|
|
}
|
2022-06-06 07:58:39 +00:00
|
|
|
net.channels.Del(name)
|
2020-04-11 15:00:40 +00:00
|
|
|
return nil
|
2020-04-05 13:04:52 +00:00
|
|
|
}
|
|
|
|
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
func (net *network) updateCasemapping(newCasemap casemapping) {
|
|
|
|
net.casemap = newCasemap
|
|
|
|
net.channels.SetCasemapping(newCasemap)
|
2021-03-29 15:49:50 +00:00
|
|
|
net.delivered.m.SetCasemapping(newCasemap)
|
2022-07-08 14:55:29 +00:00
|
|
|
net.pushTargets.SetCasemapping(newCasemap)
|
2021-11-09 16:59:43 +00:00
|
|
|
if uc := net.conn; uc != nil {
|
|
|
|
uc.channels.SetCasemapping(newCasemap)
|
2022-06-06 08:04:50 +00:00
|
|
|
uc.channels.ForEach(func(uch *upstreamChannel) {
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
uch.Members.SetCasemapping(newCasemap)
|
2022-06-06 07:58:39 +00:00
|
|
|
})
|
Add WHO cache
This adds a new field to upstreams, members, which is a casemapped map
of upstream users known to the soju. The upstream users known to soju
are: self, any monitored user, and any user with whom we share a
channel.
The information stored for each upstream user corresponds to the info
that can be returned by a WHO/WHOX command.
We build the upstream user information both incrementally, capturing
information contained in JOIN and AWAY messages; and with the bulk user
information contained in WHO replies we receive.
This lets us build a user cache that can then be used to return
synthetic WHO responses to later WHO requests by downstreams.
This is useful because some networks (eg Libera) heavily throttle WHO
commands, and without this cache, any downstream connecting would send 1
WHO command per channel, so possibly more than a dozen WHO commands,
which soju then forwarded to the upstream as WHO commands.
With this cache most WHO commands can be cached and avoid sending
WHO commands to the upstream.
In order to cache the "flags" field, we synthetize the field from user
info we get from incremental messages: away status (H/G) and bot status
(B). This could result in incorrect values for proprietary user fields.
Support for the server-operator status (*) is also not supported.
Of note is that it is difficult to obtain a user "connected server"
field incrementally, so clients that want to maximize their WHO cache
hit ratio can use WHOX to only request fields they need, and in
particular not include the server field flag.
Co-authored-by: delthas <delthas@dille.cc>
2022-12-01 14:47:58 +00:00
|
|
|
uc.users.SetCasemapping(newCasemap)
|
2021-11-09 16:59:43 +00:00
|
|
|
uc.monitored.SetCasemapping(newCasemap)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
}
|
2021-11-09 16:59:43 +00:00
|
|
|
net.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.monitored.SetCasemapping(newCasemap)
|
|
|
|
})
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
}
|
|
|
|
|
2021-12-02 22:32:12 +00:00
|
|
|
func (net *network) storeClientDeliveryReceipts(ctx context.Context, clientName string) {
|
2021-02-10 17:16:08 +00:00
|
|
|
if !net.user.hasPersistentMsgStore() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
var receipts []database.DeliveryReceipt
|
2021-02-10 17:16:08 +00:00
|
|
|
net.delivered.ForEachTarget(func(target string) {
|
|
|
|
msgID := net.delivered.LoadID(target, clientName)
|
|
|
|
if msgID == "" {
|
|
|
|
return
|
|
|
|
}
|
2022-05-09 10:34:43 +00:00
|
|
|
receipts = append(receipts, database.DeliveryReceipt{
|
2021-02-10 17:16:08 +00:00
|
|
|
Target: target,
|
|
|
|
InternalMsgID: msgID,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
2021-12-02 22:32:12 +00:00
|
|
|
if err := net.user.srv.db.StoreClientDeliveryReceipts(ctx, net.ID, clientName, receipts); err != nil {
|
2021-04-13 18:12:49 +00:00
|
|
|
net.logger.Printf("failed to store delivery receipts for client %q: %v", clientName, err)
|
2021-02-10 17:16:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-13 17:11:05 +00:00
|
|
|
func (net *network) isHighlight(msg *irc.Message) bool {
|
|
|
|
if msg.Command != "PRIVMSG" && msg.Command != "NOTICE" {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
text := msg.Params[1]
|
|
|
|
|
|
|
|
nick := net.Nick
|
|
|
|
if net.conn != nil {
|
|
|
|
nick = net.conn.nick
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: use case-mapping aware comparison here
|
|
|
|
return msg.Prefix.Name != nick && isHighlight(text, nick)
|
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func (net *network) detachedMessageNeedsRelay(ch *database.Channel, msg *irc.Message) bool {
|
2021-04-13 17:11:05 +00:00
|
|
|
highlight := net.isHighlight(msg)
|
2022-05-09 10:34:43 +00:00
|
|
|
return ch.RelayDetached == database.FilterMessage || ((ch.RelayDetached == database.FilterHighlight || ch.RelayDetached == database.FilterDefault) && highlight)
|
2021-04-13 17:11:05 +00:00
|
|
|
}
|
|
|
|
|
2021-11-21 15:10:54 +00:00
|
|
|
func (net *network) autoSaveSASLPlain(ctx context.Context, username, password string) {
|
|
|
|
// User may have e.g. EXTERNAL mechanism configured. We do not want to
|
|
|
|
// automatically erase the key pair or any other credentials.
|
|
|
|
if net.SASL.Mechanism != "" && net.SASL.Mechanism != "PLAIN" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
net.logger.Printf("auto-saving SASL PLAIN credentials with username %q", username)
|
|
|
|
net.SASL.Mechanism = "PLAIN"
|
|
|
|
net.SASL.Plain.Username = username
|
|
|
|
net.SASL.Plain.Password = password
|
|
|
|
if err := net.user.srv.db.StoreNetwork(ctx, net.user.ID, &net.Network); err != nil {
|
|
|
|
net.logger.Printf("failed to save SASL PLAIN credentials: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:12 +00:00
|
|
|
// broadcastWebPush broadcasts a Web Push message for the given IRC message.
|
|
|
|
//
|
|
|
|
// Broadcasting the message to all Web Push endpoints might take a while, so
|
|
|
|
// callers should call this function in a new goroutine.
|
|
|
|
func (net *network) broadcastWebPush(msg *irc.Message) {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
2022-06-16 17:33:39 +00:00
|
|
|
subs, err := net.user.srv.db.ListWebPushSubscriptions(ctx, net.user.ID, net.ID)
|
2021-11-27 10:48:10 +00:00
|
|
|
if err != nil {
|
|
|
|
net.logger.Printf("failed to list Web push subscriptions: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, sub := range subs {
|
|
|
|
err := net.user.srv.sendWebPush(ctx, &webpush.Subscription{
|
|
|
|
Endpoint: sub.Endpoint,
|
|
|
|
Keys: webpush.Keys{
|
|
|
|
Auth: sub.Keys.Auth,
|
|
|
|
P256dh: sub.Keys.P256DH,
|
|
|
|
},
|
|
|
|
}, sub.Keys.VAPID, msg)
|
|
|
|
if err == errWebPushSubscriptionExpired {
|
|
|
|
if err := net.user.srv.db.DeleteWebPushSubscription(ctx, sub.ID); err != nil {
|
|
|
|
net.logger.Printf("failed to delete expired Web Push subscription: %v", err)
|
|
|
|
}
|
2023-02-23 12:33:55 +00:00
|
|
|
net.logger.Debugf("deleted expired Web Push subscription %q", sub.Endpoint)
|
|
|
|
} else if err != nil {
|
|
|
|
net.logger.Printf("failed to send Web push notification to endpoint %q: %v", sub.Endpoint, err)
|
2021-11-27 10:48:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
type user struct {
|
2022-05-09 10:34:43 +00:00
|
|
|
database.User
|
2021-04-13 11:04:23 +00:00
|
|
|
srv *Server
|
|
|
|
logger Logger
|
2020-03-16 10:18:41 +00:00
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
events chan event
|
2020-08-07 13:30:05 +00:00
|
|
|
done chan struct{}
|
2020-03-16 11:44:59 +00:00
|
|
|
|
2023-01-26 15:57:07 +00:00
|
|
|
numDownstreamConns atomic.Int64
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
networks []*network
|
|
|
|
downstreamConns []*downstreamConn
|
2022-05-09 14:25:57 +00:00
|
|
|
msgStore msgstore.Store
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func newUser(srv *Server, record *database.User) *user {
|
2021-04-13 11:04:23 +00:00
|
|
|
logger := &prefixLogger{srv.Logger, fmt.Sprintf("user %q: ", record.Username)}
|
|
|
|
|
2022-05-09 14:25:57 +00:00
|
|
|
var msgStore msgstore.Store
|
2022-12-10 23:01:16 +00:00
|
|
|
switch srv.Config().LogDriver {
|
|
|
|
case "fs":
|
|
|
|
msgStore = msgstore.NewFSStore(srv.Config().LogPath, record)
|
|
|
|
case "db":
|
|
|
|
msgStore = msgstore.NewDBStore(srv.db)
|
|
|
|
case "memory":
|
2022-05-09 14:25:57 +00:00
|
|
|
msgStore = msgstore.NewMemoryStore()
|
2020-10-25 10:13:51 +00:00
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
return &user{
|
2021-02-10 17:16:08 +00:00
|
|
|
User: *record,
|
|
|
|
srv: srv,
|
2021-04-13 11:04:23 +00:00
|
|
|
logger: logger,
|
2021-02-10 17:16:08 +00:00
|
|
|
events: make(chan event, 64),
|
|
|
|
done: make(chan struct{}),
|
|
|
|
msgStore: msgStore,
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *user) forEachUpstream(f func(uc *upstreamConn)) {
|
|
|
|
for _, network := range u.networks {
|
2020-04-30 08:25:16 +00:00
|
|
|
if network.conn == nil {
|
2020-03-16 10:18:41 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-04-30 08:25:16 +00:00
|
|
|
f(network.conn)
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *user) getNetwork(name string) *network {
|
|
|
|
for _, network := range u.networks {
|
|
|
|
if network.Addr == name {
|
|
|
|
return network
|
|
|
|
}
|
2020-04-01 13:02:59 +00:00
|
|
|
if network.Name != "" && network.Name == name {
|
|
|
|
return network
|
|
|
|
}
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
func (u *user) getNetworkByID(id int64) *network {
|
|
|
|
for _, net := range u.networks {
|
|
|
|
if net.ID == id {
|
|
|
|
return net
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
func (u *user) run() {
|
2020-10-25 10:13:51 +00:00
|
|
|
defer func() {
|
|
|
|
if u.msgStore != nil {
|
|
|
|
if err := u.msgStore.Close(); err != nil {
|
2021-04-13 11:04:23 +00:00
|
|
|
u.logger.Printf("failed to close message store for user %q: %v", u.Username, err)
|
2020-10-25 10:13:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
close(u.done)
|
|
|
|
}()
|
2020-08-07 13:30:05 +00:00
|
|
|
|
2021-10-18 17:15:15 +00:00
|
|
|
networks, err := u.srv.db.ListNetworks(context.TODO(), u.ID)
|
2020-03-16 10:18:41 +00:00
|
|
|
if err != nil {
|
2021-04-13 11:04:23 +00:00
|
|
|
u.logger.Printf("failed to list networks for user %q: %v", u.Username, err)
|
2020-03-16 10:18:41 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-02-04 14:03:13 +00:00
|
|
|
sort.Slice(networks, func(i, j int) bool {
|
|
|
|
return networks[i].ID < networks[j].ID
|
|
|
|
})
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
for _, record := range networks {
|
2020-05-01 00:02:41 +00:00
|
|
|
record := record
|
2021-10-18 17:15:15 +00:00
|
|
|
channels, err := u.srv.db.ListChannels(context.TODO(), record.ID)
|
2020-04-11 15:00:40 +00:00
|
|
|
if err != nil {
|
2021-04-13 11:04:23 +00:00
|
|
|
u.logger.Printf("failed to list channels for user %q, network %q: %v", u.Username, record.GetName(), err)
|
2020-07-09 12:20:23 +00:00
|
|
|
continue
|
2020-04-11 15:00:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
network := newNetwork(u, &record, channels)
|
2020-03-16 10:18:41 +00:00
|
|
|
u.networks = append(u.networks, network)
|
|
|
|
|
2021-02-10 17:16:08 +00:00
|
|
|
if u.hasPersistentMsgStore() {
|
2021-10-18 17:15:15 +00:00
|
|
|
receipts, err := u.srv.db.ListDeliveryReceipts(context.TODO(), record.ID)
|
2021-02-10 17:16:08 +00:00
|
|
|
if err != nil {
|
2021-04-13 11:04:23 +00:00
|
|
|
u.logger.Printf("failed to load delivery receipts for user %q, network %q: %v", u.Username, network.GetName(), err)
|
2021-02-10 17:16:08 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, rcpt := range receipts {
|
|
|
|
network.delivered.StoreID(rcpt.Target, rcpt.Client, rcpt.InternalMsgID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
go network.run()
|
|
|
|
}
|
2020-03-16 11:44:59 +00:00
|
|
|
|
2020-03-27 15:33:19 +00:00
|
|
|
for e := range u.events {
|
|
|
|
switch e := e.(type) {
|
2020-04-01 10:05:25 +00:00
|
|
|
case eventUpstreamConnected:
|
2020-04-01 10:16:32 +00:00
|
|
|
uc := e.uc
|
2020-04-01 10:21:31 +00:00
|
|
|
|
|
|
|
uc.network.conn = uc
|
|
|
|
|
2020-04-01 10:16:32 +00:00
|
|
|
uc.updateAway()
|
2021-11-09 16:59:43 +00:00
|
|
|
uc.updateMonitor()
|
2020-04-04 02:48:25 +00:00
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-29 14:28:33 +00:00
|
|
|
dc.updateSupportedCaps()
|
2021-01-22 19:55:53 +00:00
|
|
|
|
2022-03-14 18:15:35 +00:00
|
|
|
if !dc.caps.IsEnabled("soju.im/bouncer-networks") {
|
2021-05-26 09:04:33 +00:00
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("connected to %s", uc.network.GetName()))
|
|
|
|
}
|
|
|
|
|
|
|
|
dc.updateNick()
|
2022-03-21 15:09:45 +00:00
|
|
|
dc.updateHost()
|
2021-05-26 09:04:33 +00:00
|
|
|
dc.updateRealname()
|
2021-11-19 18:21:48 +00:00
|
|
|
dc.updateAccount()
|
2021-05-26 09:04:33 +00:00
|
|
|
})
|
2022-04-15 08:41:38 +00:00
|
|
|
u.notifyBouncerNetworkState(uc.network.ID, irc.Tags{
|
|
|
|
"state": "connected",
|
|
|
|
"error": "",
|
|
|
|
})
|
2020-04-04 02:48:25 +00:00
|
|
|
uc.network.lastError = nil
|
2020-03-27 23:51:58 +00:00
|
|
|
case eventUpstreamDisconnected:
|
2020-06-02 09:39:53 +00:00
|
|
|
u.handleUpstreamDisconnected(e.uc)
|
2020-04-04 02:48:25 +00:00
|
|
|
case eventUpstreamConnectionError:
|
|
|
|
net := e.net
|
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
stopped := false
|
|
|
|
select {
|
|
|
|
case <-net.stopped:
|
|
|
|
stopped = true
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
if !stopped && (net.lastError == nil || net.lastError.Error() != e.err.Error()) {
|
2020-04-04 02:48:25 +00:00
|
|
|
net.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-05 13:15:42 +00:00
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("failed connecting/registering to %s: %v", net.GetName(), e.err))
|
2020-04-04 02:48:25 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
net.lastError = e.err
|
2022-04-15 08:41:38 +00:00
|
|
|
u.notifyBouncerNetworkState(net.ID, irc.Tags{
|
2022-11-14 11:06:58 +00:00
|
|
|
"error": net.lastError.Error(),
|
2022-04-15 08:41:38 +00:00
|
|
|
})
|
2020-04-04 02:48:25 +00:00
|
|
|
case eventUpstreamError:
|
|
|
|
uc := e.uc
|
|
|
|
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2020-04-05 13:15:42 +00:00
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("disconnected from %s: %v", uc.network.GetName(), e.err))
|
2020-04-04 02:48:25 +00:00
|
|
|
})
|
|
|
|
uc.network.lastError = e.err
|
2022-04-15 08:41:38 +00:00
|
|
|
u.notifyBouncerNetworkState(uc.network.ID, irc.Tags{
|
2022-11-14 11:06:58 +00:00
|
|
|
"error": uc.network.lastError.Error(),
|
2022-04-15 08:41:38 +00:00
|
|
|
})
|
2020-03-27 15:33:19 +00:00
|
|
|
case eventUpstreamMessage:
|
|
|
|
msg, uc := e.msg, e.uc
|
2020-03-27 22:08:35 +00:00
|
|
|
if uc.isClosed() {
|
2020-03-21 07:29:44 +00:00
|
|
|
uc.logger.Printf("ignoring message on closed connection: %v", msg)
|
|
|
|
break
|
|
|
|
}
|
2021-12-02 22:27:12 +00:00
|
|
|
if err := uc.handleMessage(context.TODO(), msg); err != nil {
|
2020-03-16 11:44:59 +00:00
|
|
|
uc.logger.Printf("failed to handle message %q: %v", msg, err)
|
|
|
|
}
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
case eventChannelDetach:
|
|
|
|
uc, name := e.uc, e.name
|
2022-06-06 07:58:39 +00:00
|
|
|
c := uc.network.channels.Get(name)
|
Implement casemapping
TL;DR: supports for casemapping, now logs are saved in
casemapped/canonical/tolower form
(eg. in the #channel directory instead of #Channel... or something)
== What is casemapping? ==
see <https://modern.ircdocs.horse/#casemapping-parameter>
== Casemapping and multi-upstream ==
Since each upstream does not necessarily use the same casemapping, and
since casemappings cannot coexist [0],
1. soju must also update the database accordingly to upstreams'
casemapping, otherwise it will end up inconsistent,
2. soju must "normalize" entity names and expose only one casemapping
that is a subset of all supported casemappings (here, ascii).
[0] On some upstreams, "emersion[m]" and "emersion{m}" refer to the same
user (upstreams that advertise rfc1459 for example), while on others
(upstreams that advertise ascii) they don't.
Once upstream's casemapping is known (default to rfc1459), entity names
in map keys are made into casemapped form, for upstreamConn,
upstreamChannel and network.
downstreamConn advertises "CASEMAPPING=ascii", and always casemap map
keys with ascii.
Some functions require the caller to casemap their argument (to avoid
needless calls to casemapping functions).
== Message forwarding and casemapping ==
downstream message handling (joins and parts basically):
When relaying entity names from downstreams to upstreams, soju uses the
upstream casemapping, in order to not get in the way of the user. This
does not brings any issue, as long as soju replies with the ascii
casemapping in mind (solves point 1.).
marshalEntity/marshalUserPrefix:
When relaying entity names from upstreams with non-ascii casemappings,
soju *partially* casemap them: it only change the case of characters
which are not ascii letters. ASCII case is thus kept intact, while
special symbols like []{} are the same every time soju sends them to
downstreams (solves point 2.).
== Casemapping changes ==
Casemapping changes are not fully supported by this patch and will
result in loss of history. This is a limitation of the protocol and
should be solved by the RENAME spec.
2021-03-16 09:00:34 +00:00
|
|
|
if c == nil || c.Detached {
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
uc.network.detach(c)
|
2021-10-18 17:15:15 +00:00
|
|
|
if err := uc.srv.db.StoreChannel(context.TODO(), uc.network.ID, c); err != nil {
|
2021-04-13 11:04:23 +00:00
|
|
|
u.logger.Printf("failed to store updated detached channel %q: %v", c.Name, err)
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
}
|
2020-03-27 16:21:05 +00:00
|
|
|
case eventDownstreamConnected:
|
|
|
|
dc := e.dc
|
2023-01-26 13:02:11 +00:00
|
|
|
ctx := context.TODO()
|
2020-03-27 18:17:58 +00:00
|
|
|
|
2021-11-09 16:59:43 +00:00
|
|
|
if dc.network != nil {
|
|
|
|
dc.monitored.SetCasemapping(dc.network.casemap)
|
|
|
|
}
|
|
|
|
|
2023-01-26 18:51:35 +00:00
|
|
|
if !u.Enabled && u.srv.Config().EnableUsersOnAuth {
|
|
|
|
record := u.User
|
|
|
|
record.Enabled = true
|
|
|
|
if err := u.updateUser(ctx, &record); err != nil {
|
|
|
|
dc.logger.Printf("failed to enable user after successful authentication: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-26 17:33:55 +00:00
|
|
|
if !u.Enabled {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Command: "ERROR",
|
|
|
|
Params: []string{"This bouncer account is disabled"},
|
|
|
|
})
|
|
|
|
// TODO: close dc after the error message is sent
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-01-26 13:02:11 +00:00
|
|
|
if err := dc.welcome(ctx); err != nil {
|
2022-03-20 13:51:34 +00:00
|
|
|
if ircErr, ok := err.(ircError); ok {
|
|
|
|
msg := ircErr.Message.Copy()
|
|
|
|
msg.Prefix = dc.srv.prefix()
|
|
|
|
dc.SendMessage(msg)
|
|
|
|
} else {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Command: "ERROR",
|
|
|
|
Params: []string{"Internal server error"},
|
|
|
|
})
|
|
|
|
}
|
2020-03-27 18:17:58 +00:00
|
|
|
dc.logger.Printf("failed to handle new registered connection: %v", err)
|
2022-03-20 13:51:34 +00:00
|
|
|
// TODO: close dc after the error message is sent
|
2020-03-27 18:17:58 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-03-27 16:21:05 +00:00
|
|
|
u.downstreamConns = append(u.downstreamConns, dc)
|
2023-01-26 15:57:07 +00:00
|
|
|
u.numDownstreamConns.Add(1)
|
2020-04-01 10:16:32 +00:00
|
|
|
|
2021-03-16 08:41:07 +00:00
|
|
|
dc.forEachNetwork(func(network *network) {
|
|
|
|
if network.lastError != nil {
|
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("disconnected from %s: %v", network.GetName(), network.lastError))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2020-04-01 10:16:32 +00:00
|
|
|
u.forEachUpstream(func(uc *upstreamConn) {
|
|
|
|
uc.updateAway()
|
|
|
|
})
|
2023-01-26 13:02:11 +00:00
|
|
|
|
|
|
|
u.bumpDownstreamInteractionTime(ctx)
|
2020-03-27 16:55:03 +00:00
|
|
|
case eventDownstreamDisconnected:
|
|
|
|
dc := e.dc
|
2023-01-26 13:02:11 +00:00
|
|
|
ctx := context.TODO()
|
2020-04-01 14:02:31 +00:00
|
|
|
|
2020-03-27 16:55:03 +00:00
|
|
|
for i := range u.downstreamConns {
|
|
|
|
if u.downstreamConns[i] == dc {
|
|
|
|
u.downstreamConns = append(u.downstreamConns[:i], u.downstreamConns[i+1:]...)
|
2023-01-26 15:57:07 +00:00
|
|
|
u.numDownstreamConns.Add(-1)
|
2020-03-27 16:55:03 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-04-01 10:16:32 +00:00
|
|
|
|
2021-02-10 17:16:08 +00:00
|
|
|
dc.forEachNetwork(func(net *network) {
|
2023-01-26 13:02:11 +00:00
|
|
|
net.storeClientDeliveryReceipts(ctx, dc.clientName)
|
2021-02-10 17:16:08 +00:00
|
|
|
})
|
|
|
|
|
2020-04-01 10:16:32 +00:00
|
|
|
u.forEachUpstream(func(uc *upstreamConn) {
|
2021-12-02 18:29:44 +00:00
|
|
|
uc.cancelPendingCommandsByDownstreamID(dc.id)
|
2020-04-01 10:16:32 +00:00
|
|
|
uc.updateAway()
|
2021-11-09 16:59:43 +00:00
|
|
|
uc.updateMonitor()
|
2020-04-01 10:16:32 +00:00
|
|
|
})
|
2023-01-26 13:02:11 +00:00
|
|
|
|
|
|
|
u.bumpDownstreamInteractionTime(ctx)
|
2020-03-27 15:33:19 +00:00
|
|
|
case eventDownstreamMessage:
|
|
|
|
msg, dc := e.msg, e.dc
|
2020-03-21 07:29:44 +00:00
|
|
|
if dc.isClosed() {
|
|
|
|
dc.logger.Printf("ignoring message on closed connection: %v", msg)
|
|
|
|
break
|
|
|
|
}
|
2021-11-17 13:54:03 +00:00
|
|
|
err := dc.handleMessage(context.TODO(), msg)
|
2020-03-16 11:44:59 +00:00
|
|
|
if ircErr, ok := err.(ircError); ok {
|
|
|
|
ircErr.Message.Prefix = dc.srv.prefix()
|
|
|
|
dc.SendMessage(ircErr.Message)
|
|
|
|
} else if err != nil {
|
|
|
|
dc.logger.Printf("failed to handle message %q: %v", msg, err)
|
|
|
|
dc.Close()
|
|
|
|
}
|
2021-06-23 17:21:18 +00:00
|
|
|
case eventBroadcast:
|
|
|
|
msg := e.msg
|
2022-04-15 08:32:28 +00:00
|
|
|
for _, dc := range u.downstreamConns {
|
2021-06-23 17:21:18 +00:00
|
|
|
dc.SendMessage(msg)
|
2022-04-15 08:32:28 +00:00
|
|
|
}
|
2021-10-12 07:11:14 +00:00
|
|
|
case eventUserUpdate:
|
|
|
|
// copy the user record because we'll mutate it
|
|
|
|
record := u.User
|
|
|
|
|
|
|
|
if e.password != nil {
|
|
|
|
record.Password = *e.password
|
|
|
|
}
|
|
|
|
if e.admin != nil {
|
|
|
|
record.Admin = *e.admin
|
|
|
|
}
|
2023-01-26 17:33:55 +00:00
|
|
|
if e.enabled != nil {
|
|
|
|
record.Enabled = *e.enabled
|
|
|
|
}
|
2021-10-12 07:11:14 +00:00
|
|
|
|
2021-11-08 18:36:10 +00:00
|
|
|
e.done <- u.updateUser(context.TODO(), &record)
|
2021-10-12 07:11:14 +00:00
|
|
|
|
|
|
|
// If the password was updated, kill all downstream connections to
|
|
|
|
// force them to re-authenticate with the new credentials.
|
|
|
|
if e.password != nil {
|
2022-04-15 08:32:28 +00:00
|
|
|
for _, dc := range u.downstreamConns {
|
2021-10-12 07:11:14 +00:00
|
|
|
dc.Close()
|
2022-04-15 08:32:28 +00:00
|
|
|
}
|
2021-10-12 07:11:14 +00:00
|
|
|
}
|
2022-07-14 10:25:47 +00:00
|
|
|
case eventTryRegainNick:
|
|
|
|
e.uc.tryRegainNick(e.nick)
|
2023-01-17 13:03:13 +00:00
|
|
|
case eventUserRun:
|
|
|
|
ctx := context.TODO()
|
2023-01-20 14:46:42 +00:00
|
|
|
err := handleServiceCommand(&serviceContext{
|
2023-01-17 13:03:13 +00:00
|
|
|
Context: ctx,
|
|
|
|
user: u,
|
2023-01-19 17:33:22 +00:00
|
|
|
srv: u.srv,
|
2023-01-19 17:13:59 +00:00
|
|
|
admin: u.Admin,
|
2023-01-17 13:03:13 +00:00
|
|
|
print: func(text string) {
|
|
|
|
// Avoid blocking on e.print in case our context is canceled.
|
|
|
|
// This is a no-op right now because we use context.TODO(),
|
|
|
|
// but might be useful later when we add timeouts.
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
case e.print <- text:
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}, e.params)
|
2023-01-20 14:46:42 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
case e.ret <- err:
|
|
|
|
}
|
2020-08-03 16:45:13 +00:00
|
|
|
case eventStop:
|
2022-04-15 08:32:28 +00:00
|
|
|
for _, dc := range u.downstreamConns {
|
2020-08-03 16:45:13 +00:00
|
|
|
dc.Close()
|
2022-04-15 08:32:28 +00:00
|
|
|
}
|
2020-08-03 16:45:13 +00:00
|
|
|
for _, n := range u.networks {
|
|
|
|
n.stop()
|
2021-02-10 17:16:08 +00:00
|
|
|
|
|
|
|
n.delivered.ForEachClient(func(clientName string) {
|
2021-12-02 22:32:12 +00:00
|
|
|
n.storeClientDeliveryReceipts(context.TODO(), clientName)
|
2021-02-10 17:16:08 +00:00
|
|
|
})
|
2020-08-03 16:45:13 +00:00
|
|
|
}
|
|
|
|
return
|
2020-03-27 15:33:19 +00:00
|
|
|
default:
|
2021-04-13 11:08:48 +00:00
|
|
|
panic(fmt.Sprintf("received unknown event type: %T", e))
|
2020-03-16 11:44:59 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-16 10:18:41 +00:00
|
|
|
}
|
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
func (u *user) handleUpstreamDisconnected(uc *upstreamConn) {
|
|
|
|
uc.network.conn = nil
|
|
|
|
|
2022-07-14 10:25:47 +00:00
|
|
|
uc.stopRegainNickTimer()
|
2021-12-06 21:33:50 +00:00
|
|
|
uc.abortPendingCommands()
|
2020-06-02 09:39:53 +00:00
|
|
|
|
2022-06-06 08:04:50 +00:00
|
|
|
uc.channels.ForEach(func(uch *upstreamChannel) {
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
uch.updateAutoDetach(0)
|
2022-06-06 07:58:39 +00:00
|
|
|
})
|
Add customizable auto-detaching, auto-reattaching, relaying.
This uses the fields added previously to the Channel struct to implement
the actual detaching/reattaching/relaying logic.
The `FilterDefault` values of the messages filters are currently
hardcoded.
The values of the message filters are not currently user-settable.
This introduces a new user event, eventChannelDetach, which stores an
upstreamConn (which might become invalid at the time of processing), and
a channel name, used for auto-detaching. Every time the channel detach
timer is refreshed (by receveing a message, etc.), a new timer is
created on the upstreamChannel, which will dispatch this event after the
duration (and discards the previous timer, if any).
2020-11-30 21:08:33 +00:00
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
|
|
|
dc.updateSupportedCaps()
|
2021-05-26 09:04:33 +00:00
|
|
|
})
|
2021-09-13 08:33:46 +00:00
|
|
|
|
|
|
|
// If the network has been removed, don't send a state change notification
|
|
|
|
found := false
|
|
|
|
for _, net := range u.networks {
|
|
|
|
if net == uc.network {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-04-15 08:41:38 +00:00
|
|
|
u.notifyBouncerNetworkState(uc.network.ID, irc.Tags{"state": "disconnected"})
|
2020-06-02 09:39:53 +00:00
|
|
|
|
|
|
|
if uc.network.lastError == nil {
|
|
|
|
uc.forEachDownstream(func(dc *downstreamConn) {
|
2022-03-14 18:15:35 +00:00
|
|
|
if !dc.caps.IsEnabled("soju.im/bouncer-networks") {
|
2021-01-22 19:55:53 +00:00
|
|
|
sendServiceNOTICE(dc, fmt.Sprintf("disconnected from %s", uc.network.GetName()))
|
|
|
|
}
|
2020-06-02 09:39:53 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-15 08:41:38 +00:00
|
|
|
func (u *user) notifyBouncerNetworkState(netID int64, attrs irc.Tags) {
|
|
|
|
netIDStr := fmt.Sprintf("%v", netID)
|
|
|
|
for _, dc := range u.downstreamConns {
|
|
|
|
if dc.caps.IsEnabled("soju.im/bouncer-networks-notify") {
|
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: "BOUNCER",
|
|
|
|
Params: []string{"NETWORK", netIDStr, attrs.String()},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
func (u *user) addNetwork(network *network) {
|
|
|
|
u.networks = append(u.networks, network)
|
2022-02-04 14:03:13 +00:00
|
|
|
|
|
|
|
sort.Slice(u.networks, func(i, j int) bool {
|
|
|
|
return u.networks[i].ID < u.networks[j].ID
|
|
|
|
})
|
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
go network.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *user) removeNetwork(network *network) {
|
|
|
|
network.stop()
|
|
|
|
|
2022-04-15 08:32:28 +00:00
|
|
|
for _, dc := range u.downstreamConns {
|
2020-06-02 09:39:53 +00:00
|
|
|
if dc.network != nil && dc.network == network {
|
|
|
|
dc.Close()
|
|
|
|
}
|
2022-04-15 08:32:28 +00:00
|
|
|
}
|
2020-06-02 09:39:53 +00:00
|
|
|
|
|
|
|
for i, net := range u.networks {
|
|
|
|
if net == network {
|
|
|
|
u.networks = append(u.networks[:i], u.networks[i+1:]...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
panic("tried to remove a non-existing network")
|
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func (u *user) checkNetwork(record *database.Network) error {
|
2021-12-01 14:56:43 +00:00
|
|
|
url, err := record.URL()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if url.User != nil {
|
|
|
|
return fmt.Errorf("%v:// URL must not have username and password information", url.Scheme)
|
|
|
|
}
|
|
|
|
if url.RawQuery != "" {
|
|
|
|
return fmt.Errorf("%v:// URL must not have query values", url.Scheme)
|
|
|
|
}
|
|
|
|
if url.Fragment != "" {
|
|
|
|
return fmt.Errorf("%v:// URL must not have a fragment", url.Scheme)
|
|
|
|
}
|
|
|
|
switch url.Scheme {
|
|
|
|
case "ircs", "irc+insecure":
|
|
|
|
if url.Host == "" {
|
|
|
|
return fmt.Errorf("%v:// URL must have a host", url.Scheme)
|
|
|
|
}
|
|
|
|
if url.Path != "" {
|
|
|
|
return fmt.Errorf("%v:// URL must not have a path", url.Scheme)
|
|
|
|
}
|
|
|
|
case "irc+unix", "unix":
|
|
|
|
if url.Host != "" {
|
|
|
|
return fmt.Errorf("%v:// URL must not have a host", url.Scheme)
|
|
|
|
}
|
|
|
|
if url.Path == "" {
|
|
|
|
return fmt.Errorf("%v:// URL must have a path", url.Scheme)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown URL scheme %q", url.Scheme)
|
|
|
|
}
|
|
|
|
|
2022-02-07 20:38:36 +00:00
|
|
|
if record.GetName() == "" {
|
|
|
|
return fmt.Errorf("network name cannot be empty")
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(record.GetName(), "-") {
|
|
|
|
// Can be mixed up with flags when sending commands to the service
|
|
|
|
return fmt.Errorf("network name cannot start with a dash character")
|
|
|
|
}
|
|
|
|
|
2021-04-13 17:31:39 +00:00
|
|
|
for _, net := range u.networks {
|
|
|
|
if net.GetName() == record.GetName() && net.ID != record.ID {
|
|
|
|
return fmt.Errorf("a network with the name %q already exists", record.GetName())
|
|
|
|
}
|
|
|
|
}
|
2021-12-01 14:56:43 +00:00
|
|
|
|
2021-04-13 17:31:39 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func (u *user) createNetwork(ctx context.Context, record *database.Network) (*network, error) {
|
2020-06-02 09:39:53 +00:00
|
|
|
if record.ID != 0 {
|
2020-03-25 10:28:25 +00:00
|
|
|
panic("tried creating an already-existing network")
|
|
|
|
}
|
|
|
|
|
2021-04-13 17:31:39 +00:00
|
|
|
if err := u.checkNetwork(record); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-11-15 23:38:04 +00:00
|
|
|
if max := u.srv.Config().MaxUserNetworks; max >= 0 && len(u.networks) >= max {
|
2021-10-07 18:43:10 +00:00
|
|
|
return nil, fmt.Errorf("maximum number of networks reached")
|
|
|
|
}
|
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
network := newNetwork(u, record, nil)
|
2021-11-08 18:36:10 +00:00
|
|
|
err := u.srv.db.StoreNetwork(ctx, u.ID, &network.Network)
|
2020-03-16 10:18:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-25 10:28:25 +00:00
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
u.addNetwork(network)
|
2020-03-25 10:28:25 +00:00
|
|
|
|
2021-03-10 08:27:59 +00:00
|
|
|
attrs := getNetworkAttrs(network)
|
2022-04-15 08:41:38 +00:00
|
|
|
u.notifyBouncerNetworkState(network.ID, attrs)
|
2021-01-22 19:55:53 +00:00
|
|
|
|
2020-03-16 10:18:41 +00:00
|
|
|
return network, nil
|
|
|
|
}
|
2020-04-01 13:40:20 +00:00
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func (u *user) updateNetwork(ctx context.Context, record *database.Network) (*network, error) {
|
2020-06-02 09:39:53 +00:00
|
|
|
if record.ID == 0 {
|
|
|
|
panic("tried updating a new network")
|
|
|
|
}
|
|
|
|
|
2022-07-08 16:01:05 +00:00
|
|
|
// If the nickname/realname is reset to the default, just wipe the
|
|
|
|
// per-network setting
|
|
|
|
if record.Nick == u.Nick {
|
|
|
|
record.Nick = ""
|
|
|
|
}
|
2021-06-25 18:33:13 +00:00
|
|
|
if record.Realname == u.Realname {
|
|
|
|
record.Realname = ""
|
|
|
|
}
|
|
|
|
|
2021-04-13 17:31:39 +00:00
|
|
|
if err := u.checkNetwork(record); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
network := u.getNetworkByID(record.ID)
|
|
|
|
if network == nil {
|
|
|
|
panic("tried updating a non-existing network")
|
|
|
|
}
|
|
|
|
|
2021-11-08 18:36:10 +00:00
|
|
|
if err := u.srv.db.StoreNetwork(ctx, u.ID, record); err != nil {
|
2020-06-02 09:39:53 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Most network changes require us to re-connect to the upstream server
|
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
channels := make([]database.Channel, 0, network.channels.Len())
|
2022-06-06 08:04:50 +00:00
|
|
|
network.channels.ForEach(func(ch *database.Channel) {
|
2020-06-02 09:39:53 +00:00
|
|
|
channels = append(channels, *ch)
|
2022-06-06 07:58:39 +00:00
|
|
|
})
|
2020-06-02 09:39:53 +00:00
|
|
|
|
|
|
|
updatedNetwork := newNetwork(u, record, channels)
|
2020-04-01 13:40:20 +00:00
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
// If we're currently connected, disconnect and perform the necessary
|
|
|
|
// bookkeeping
|
|
|
|
if network.conn != nil {
|
|
|
|
network.stop()
|
|
|
|
// Note: this will set network.conn to nil
|
|
|
|
u.handleUpstreamDisconnected(network.conn)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Patch downstream connections to use our fresh updated network
|
2022-04-15 08:32:28 +00:00
|
|
|
for _, dc := range u.downstreamConns {
|
2020-06-02 09:39:53 +00:00
|
|
|
if dc.network != nil && dc.network == network {
|
|
|
|
dc.network = updatedNetwork
|
2020-04-01 13:40:20 +00:00
|
|
|
}
|
2022-04-15 08:32:28 +00:00
|
|
|
}
|
2020-04-01 13:40:20 +00:00
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
// We need to remove the network after patching downstream connections,
|
|
|
|
// otherwise they'll get closed
|
|
|
|
u.removeNetwork(network)
|
|
|
|
|
2021-10-15 16:11:04 +00:00
|
|
|
// The filesystem message store needs to be notified whenever the network
|
|
|
|
// is renamed
|
2022-05-09 14:25:57 +00:00
|
|
|
renameNetMsgStore, ok := u.msgStore.(msgstore.RenameNetworkStore)
|
|
|
|
if ok && updatedNetwork.GetName() != network.GetName() {
|
|
|
|
if err := renameNetMsgStore.RenameNetwork(&network.Network, &updatedNetwork.Network); err != nil {
|
|
|
|
network.logger.Printf("failed to update message store network name to %q: %v", updatedNetwork.GetName(), err)
|
2021-10-15 16:11:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
// This will re-connect to the upstream server
|
|
|
|
u.addNetwork(updatedNetwork)
|
2020-04-01 13:40:20 +00:00
|
|
|
|
2021-03-10 08:27:59 +00:00
|
|
|
// TODO: only broadcast attributes that have changed
|
|
|
|
attrs := getNetworkAttrs(updatedNetwork)
|
2022-04-15 08:41:38 +00:00
|
|
|
u.notifyBouncerNetworkState(updatedNetwork.ID, attrs)
|
2021-01-22 19:55:53 +00:00
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
return updatedNetwork, nil
|
|
|
|
}
|
|
|
|
|
2021-11-08 18:36:10 +00:00
|
|
|
func (u *user) deleteNetwork(ctx context.Context, id int64) error {
|
2020-06-02 09:39:53 +00:00
|
|
|
network := u.getNetworkByID(id)
|
|
|
|
if network == nil {
|
|
|
|
panic("tried deleting a non-existing network")
|
2020-04-01 13:40:20 +00:00
|
|
|
}
|
|
|
|
|
2021-11-08 18:36:10 +00:00
|
|
|
if err := u.srv.db.DeleteNetwork(ctx, network.ID); err != nil {
|
2020-06-02 09:39:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
u.removeNetwork(network)
|
2021-01-22 19:55:53 +00:00
|
|
|
|
|
|
|
idStr := fmt.Sprintf("%v", network.ID)
|
2022-04-15 08:32:28 +00:00
|
|
|
for _, dc := range u.downstreamConns {
|
2022-03-14 18:15:35 +00:00
|
|
|
if dc.caps.IsEnabled("soju.im/bouncer-networks-notify") {
|
2021-01-22 19:55:53 +00:00
|
|
|
dc.SendMessage(&irc.Message{
|
|
|
|
Prefix: dc.srv.prefix(),
|
|
|
|
Command: "BOUNCER",
|
|
|
|
Params: []string{"NETWORK", idStr, "*"},
|
|
|
|
})
|
|
|
|
}
|
2022-04-15 08:32:28 +00:00
|
|
|
}
|
2021-01-22 19:55:53 +00:00
|
|
|
|
2020-06-02 09:39:53 +00:00
|
|
|
return nil
|
2020-04-01 13:40:20 +00:00
|
|
|
}
|
2020-04-08 12:20:00 +00:00
|
|
|
|
2022-05-09 10:34:43 +00:00
|
|
|
func (u *user) updateUser(ctx context.Context, record *database.User) error {
|
2021-06-28 16:05:03 +00:00
|
|
|
if u.ID != record.ID {
|
|
|
|
panic("ID mismatch when updating user")
|
|
|
|
}
|
2020-08-03 16:45:13 +00:00
|
|
|
|
2022-07-08 16:01:05 +00:00
|
|
|
nickUpdated := u.Nick != record.Nick
|
2021-06-28 16:05:03 +00:00
|
|
|
realnameUpdated := u.Realname != record.Realname
|
2023-01-26 17:33:55 +00:00
|
|
|
enabledUpdated := u.Enabled != record.Enabled
|
2021-11-08 18:36:10 +00:00
|
|
|
if err := u.srv.db.StoreUser(ctx, record); err != nil {
|
2021-06-25 18:33:13 +00:00
|
|
|
return fmt.Errorf("failed to update user %q: %v", u.Username, err)
|
|
|
|
}
|
2021-06-28 16:05:03 +00:00
|
|
|
u.User = *record
|
2021-06-25 18:33:13 +00:00
|
|
|
|
2022-07-08 16:01:05 +00:00
|
|
|
if nickUpdated {
|
|
|
|
for _, net := range u.networks {
|
|
|
|
if net.Nick != "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if uc := net.conn; uc != nil {
|
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
|
|
|
Command: "NICK",
|
|
|
|
Params: []string{database.GetNick(&u.User, &net.Network)},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-26 17:33:55 +00:00
|
|
|
if realnameUpdated || enabledUpdated {
|
2021-06-28 16:05:03 +00:00
|
|
|
// Re-connect to networks which use the default realname
|
2022-05-09 10:34:43 +00:00
|
|
|
var needUpdate []database.Network
|
2022-02-04 13:01:27 +00:00
|
|
|
for _, net := range u.networks {
|
2023-01-26 17:33:55 +00:00
|
|
|
// If only the realname was updated, maybe we can skip the
|
|
|
|
// re-connect
|
|
|
|
if realnameUpdated && !enabledUpdated {
|
|
|
|
// If this network has a custom realname set, no need to
|
|
|
|
// re-connect: the user-wide realname remains unused
|
|
|
|
if net.Realname != "" {
|
|
|
|
continue
|
|
|
|
}
|
2022-03-30 13:11:25 +00:00
|
|
|
|
2023-01-26 17:33:55 +00:00
|
|
|
// We only need to call updateNetwork for upstreams that don't
|
|
|
|
// support setname
|
|
|
|
if uc := net.conn; uc != nil && uc.caps.IsEnabled("setname") {
|
|
|
|
uc.SendMessage(ctx, &irc.Message{
|
|
|
|
Command: "SETNAME",
|
|
|
|
Params: []string{database.GetRealname(&u.User, &net.Network)},
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
2021-06-28 16:05:03 +00:00
|
|
|
}
|
2022-03-30 13:11:25 +00:00
|
|
|
|
|
|
|
needUpdate = append(needUpdate, net.Network)
|
2022-02-04 13:01:27 +00:00
|
|
|
}
|
2021-06-25 18:33:13 +00:00
|
|
|
|
2021-06-28 16:05:03 +00:00
|
|
|
var netErr error
|
|
|
|
for _, net := range needUpdate {
|
2021-11-08 18:36:10 +00:00
|
|
|
if _, err := u.updateNetwork(ctx, &net); err != nil {
|
2021-06-28 16:05:03 +00:00
|
|
|
netErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if netErr != nil {
|
|
|
|
return netErr
|
2021-06-25 18:33:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-26 17:33:55 +00:00
|
|
|
if !u.Enabled {
|
|
|
|
// TODO: send an error message before disconnecting
|
|
|
|
for _, dc := range u.downstreamConns {
|
|
|
|
dc.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-28 16:05:03 +00:00
|
|
|
return nil
|
2021-06-25 18:33:13 +00:00
|
|
|
}
|
|
|
|
|
2023-01-26 15:32:21 +00:00
|
|
|
func (u *user) stop(ctx context.Context) error {
|
|
|
|
select {
|
|
|
|
case <-u.done:
|
|
|
|
return nil // already stopped
|
|
|
|
case u.events <- eventStop{}:
|
|
|
|
// we've requested to stop, let's wait for the user goroutine to exit
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-u.done:
|
|
|
|
return nil
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2020-08-03 16:45:13 +00:00
|
|
|
}
|
2021-02-10 17:16:08 +00:00
|
|
|
|
|
|
|
func (u *user) hasPersistentMsgStore() bool {
|
|
|
|
if u.msgStore == nil {
|
|
|
|
return false
|
|
|
|
}
|
2022-05-09 14:25:57 +00:00
|
|
|
return !msgstore.IsMemoryStore(u.msgStore)
|
2021-02-10 17:16:08 +00:00
|
|
|
}
|
2021-10-21 17:14:39 +00:00
|
|
|
|
2022-10-18 12:57:33 +00:00
|
|
|
func (u *user) FormatServerTime(t time.Time) string {
|
|
|
|
if u.msgStore != nil && msgstore.IsFSStore(u.msgStore) {
|
|
|
|
// The FS message store truncates message timestamps to the second,
|
|
|
|
// so truncate them here to get consistent timestamps.
|
|
|
|
t = t.Truncate(time.Second)
|
|
|
|
}
|
|
|
|
return xirc.FormatServerTime(t)
|
|
|
|
}
|
|
|
|
|
2021-10-21 17:14:39 +00:00
|
|
|
// localAddrForHost returns the local address to use when connecting to host.
|
|
|
|
// A nil address is returned when the OS should automatically pick one.
|
2021-12-02 09:53:43 +00:00
|
|
|
func (u *user) localTCPAddrForHost(ctx context.Context, host string) (*net.TCPAddr, error) {
|
2021-10-21 17:14:39 +00:00
|
|
|
upstreamUserIPs := u.srv.Config().UpstreamUserIPs
|
|
|
|
if len(upstreamUserIPs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2021-12-02 09:53:43 +00:00
|
|
|
ips, err := net.DefaultResolver.LookupIP(ctx, "ip", host)
|
2021-10-21 17:14:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
wantIPv6 := false
|
|
|
|
for _, ip := range ips {
|
|
|
|
if ip.To4() == nil {
|
|
|
|
wantIPv6 = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var ipNet *net.IPNet
|
|
|
|
for _, in := range upstreamUserIPs {
|
|
|
|
if wantIPv6 == (in.IP.To4() == nil) {
|
|
|
|
ipNet = in
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ipNet == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var ipInt big.Int
|
|
|
|
ipInt.SetBytes(ipNet.IP)
|
|
|
|
ipInt.Add(&ipInt, big.NewInt(u.ID+1))
|
|
|
|
ip := net.IP(ipInt.Bytes())
|
|
|
|
if !ipNet.Contains(ip) {
|
|
|
|
return nil, fmt.Errorf("IP network %v too small", ipNet)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &net.TCPAddr{IP: ip}, nil
|
|
|
|
}
|
2023-01-26 13:02:11 +00:00
|
|
|
|
|
|
|
func (u *user) bumpDownstreamInteractionTime(ctx context.Context) {
|
|
|
|
record := u.User
|
|
|
|
record.DownstreamInteractedAt = time.Now()
|
|
|
|
if err := u.updateUser(ctx, &record); err != nil {
|
|
|
|
u.logger.Printf("failed to bump downstream interaction time: %v", err)
|
|
|
|
}
|
|
|
|
}
|