soju/user.go

473 lines
11 KiB
Go
Raw Normal View History

2020-03-16 03:18:41 -07:00
package soju
import (
"fmt"
2020-03-16 03:18:41 -07:00
"time"
"gopkg.in/irc.v3"
2020-03-16 03:18:41 -07:00
)
type event interface{}
type eventUpstreamMessage struct {
msg *irc.Message
uc *upstreamConn
}
type eventUpstreamConnectionError struct {
net *network
err error
}
type eventUpstreamConnected struct {
uc *upstreamConn
}
type eventUpstreamDisconnected struct {
uc *upstreamConn
}
type eventUpstreamError struct {
uc *upstreamConn
err error
}
type eventDownstreamMessage struct {
msg *irc.Message
dc *downstreamConn
}
type eventDownstreamConnected struct {
dc *downstreamConn
}
type eventDownstreamDisconnected struct {
dc *downstreamConn
}
type networkHistory struct {
offlineClients map[string]uint64 // indexed by client name
ring *Ring // can be nil if there are no offline clients
}
2020-03-16 03:18:41 -07:00
type network struct {
Network
user *user
stopped chan struct{}
2020-03-20 14:48:17 -07:00
conn *upstreamConn
channels map[string]*Channel
history map[string]*networkHistory // indexed by entity
offlineClients map[string]struct{} // indexed by client name
lastError error
2020-03-16 03:18:41 -07:00
}
func newNetwork(user *user, record *Network, channels []Channel) *network {
m := make(map[string]*Channel, len(channels))
for _, ch := range channels {
ch := ch
m[ch.Name] = &ch
}
2020-03-16 03:18:41 -07:00
return &network{
Network: *record,
user: user,
stopped: make(chan struct{}),
channels: m,
history: make(map[string]*networkHistory),
offlineClients: make(map[string]struct{}),
2020-03-16 03:18:41 -07:00
}
}
func (net *network) forEachDownstream(f func(*downstreamConn)) {
net.user.forEachDownstream(func(dc *downstreamConn) {
if dc.network != nil && dc.network != net {
return
}
f(dc)
})
}
2020-06-03 08:28:31 -07:00
func (net *network) isStopped() bool {
select {
case <-net.stopped:
return true
default:
return false
}
}
2020-03-16 03:18:41 -07:00
func (net *network) run() {
var lastTry time.Time
for {
2020-06-03 08:28:31 -07:00
if net.isStopped() {
return
}
2020-03-16 03:18:41 -07:00
if dur := time.Now().Sub(lastTry); dur < retryConnectMinDelay {
delay := retryConnectMinDelay - dur
net.user.srv.Logger.Printf("waiting %v before trying to reconnect to %q", delay.Truncate(time.Second), net.Addr)
time.Sleep(delay)
}
lastTry = time.Now()
uc, err := connectToUpstream(net)
if err != nil {
net.user.srv.Logger.Printf("failed to connect to upstream server %q: %v", net.Addr, err)
net.user.events <- eventUpstreamConnectionError{net, fmt.Errorf("failed to connect: %v", err)}
2020-03-16 03:18:41 -07:00
continue
}
uc.register()
2020-04-01 03:14:36 -07:00
if err := uc.runUntilRegistered(); err != nil {
uc.logger.Printf("failed to register: %v", err)
net.user.events <- eventUpstreamConnectionError{net, fmt.Errorf("failed to register: %v", err)}
2020-04-01 03:14:36 -07:00
uc.Close()
continue
}
2020-03-16 03:18:41 -07:00
2020-06-03 08:28:31 -07:00
// TODO: this is racy with net.stopped. If the network is stopped
// before the user goroutine receives eventUpstreamConnected, the
// connection won't be closed.
net.user.events <- eventUpstreamConnected{uc}
if err := uc.readMessages(net.user.events); err != nil {
2020-03-16 03:18:41 -07:00
uc.logger.Printf("failed to handle messages: %v", err)
net.user.events <- eventUpstreamError{uc, fmt.Errorf("failed to handle messages: %v", err)}
2020-03-16 03:18:41 -07:00
}
uc.Close()
net.user.events <- eventUpstreamDisconnected{uc}
2020-03-16 03:18:41 -07:00
}
}
func (net *network) stop() {
2020-06-03 08:28:31 -07:00
if !net.isStopped() {
close(net.stopped)
}
if net.conn != nil {
net.conn.Close()
}
}
func (net *network) createUpdateChannel(ch *Channel) error {
if current, ok := net.channels[ch.Name]; ok {
ch.ID = current.ID // update channel if it already exists
}
if err := net.user.srv.db.StoreChannel(net.ID, ch); err != nil {
return err
}
prev := net.channels[ch.Name]
net.channels[ch.Name] = ch
if prev != nil && prev.Detached != ch.Detached {
history := net.history[ch.Name]
if ch.Detached {
net.user.srv.Logger.Printf("network %q: detaching channel %q", net.GetName(), ch.Name)
net.forEachDownstream(func(dc *downstreamConn) {
net.offlineClients[dc.clientName] = struct{}{}
if history != nil {
history.offlineClients[dc.clientName] = history.ring.Cur()
}
dc.SendMessage(&irc.Message{
Prefix: dc.prefix(),
Command: "PART",
Params: []string{dc.marshalEntity(net, ch.Name), "Detach"},
})
})
} else {
net.user.srv.Logger.Printf("network %q: attaching channel %q", net.GetName(), ch.Name)
var uch *upstreamChannel
if net.conn != nil {
uch = net.conn.channels[ch.Name]
}
net.forEachDownstream(func(dc *downstreamConn) {
dc.SendMessage(&irc.Message{
Prefix: dc.prefix(),
Command: "JOIN",
Params: []string{dc.marshalEntity(net, ch.Name)},
})
if uch != nil {
forwardChannel(dc, uch)
}
if history != nil {
dc.sendNetworkHistory(net)
}
})
}
}
return nil
}
func (net *network) deleteChannel(name string) error {
if err := net.user.srv.db.DeleteChannel(net.ID, name); err != nil {
return err
}
delete(net.channels, name)
return nil
}
2020-03-16 03:18:41 -07:00
type user struct {
User
srv *Server
events chan event
2020-03-16 03:18:41 -07:00
networks []*network
downstreamConns []*downstreamConn
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-25 18:40:30 -07:00
// LIST commands in progress
pendingLISTs []pendingLIST
Add LIST support This commit adds support for downstream LIST messages from multiple concurrent downstreams to multiple concurrent upstreams, including support for multiple pending LIST requests from the same downstream. Because a unique RPL_LISTEND message must be sent to the requesting downstream, and that there might be multiple upstreams, each sending their own RPL_LISTEND, a cache of RPL_LISTEND replies of some sort is required to match RPL_LISTEND together in order to only send one back downstream. This commit adds a list of "pending LIST" structs, which each contain a map of all upstreams that yet need to send a RPL_LISTEND, and the corresponding LIST request associated with that response. This list of pending LISTs is sorted according to the order that the requesting downstreams sent the LIST messages in. Each pending set also stores the id of the requesting downstream, in order to only forward the replies to it and no other downstream. (This is important because LIST replies can typically amount to several thousands messages on large servers.) When a single downstream makes multiple LIST requests, only the first one will be immediately sent to the upstream servers. The next ones will be buffered until the first one is completed. Distinct downstreams can make concurrent LIST requests without any request buffering. Each RPL_LIST message is forwarded to the downstream of the first matching pending LIST struct. When an upstream sends an RPL_LISTEND message, the upstream is removed from the first matching pending LIST struct, but that message is not immediately forwarded downstream. If there are no remaining pending LIST requests in that struct is then empty, that means all upstreams have sent back all their RPL_LISTEND replies (which means they also sent all their RPL_LIST replies); so a unique RPL_LISTEND is sent to downstream and that pending LIST set is removed from the cache. Upstreams are removed from the pending LIST structs in two other cases: - when they are closed (to avoid stalling because of a disconnected upstream that will never reply to the LIST message): they are removed from all pending LIST structs - when they reply with an ERR_UNKNOWNCOMMAND or RPL_TRYAGAIN LIST reply, which is typically used when a user is not allowed to LIST because they just joined the server: they are removed from the first pending LIST struct, as if an RPL_LISTEND message was received
2020-03-25 18:40:30 -07:00
}
type pendingLIST struct {
downstreamID uint64
// list of per-upstream LIST commands not yet sent or completed
pendingCommands map[int64]*irc.Message
2020-03-16 03:18:41 -07:00
}
func newUser(srv *Server, record *User) *user {
return &user{
User: *record,
srv: srv,
events: make(chan event, 64),
2020-03-16 03:18:41 -07:00
}
}
func (u *user) forEachNetwork(f func(*network)) {
for _, network := range u.networks {
f(network)
}
}
func (u *user) forEachUpstream(f func(uc *upstreamConn)) {
for _, network := range u.networks {
if network.conn == nil {
2020-03-16 03:18:41 -07:00
continue
}
f(network.conn)
2020-03-16 03:18:41 -07:00
}
}
func (u *user) forEachDownstream(f func(dc *downstreamConn)) {
for _, dc := range u.downstreamConns {
f(dc)
}
}
func (u *user) getNetwork(name string) *network {
for _, network := range u.networks {
if network.Addr == name {
return network
}
if network.Name != "" && network.Name == name {
return network
}
2020-03-16 03:18:41 -07:00
}
return nil
}
func (u *user) run() {
networks, err := u.srv.db.ListNetworks(u.Username)
if err != nil {
u.srv.Logger.Printf("failed to list networks for user %q: %v", u.Username, err)
return
}
for _, record := range networks {
record := record
channels, err := u.srv.db.ListChannels(record.ID)
if err != nil {
u.srv.Logger.Printf("failed to list channels for user %q, network %q: %v", u.Username, record.GetName(), err)
}
network := newNetwork(u, &record, channels)
2020-03-16 03:18:41 -07:00
u.networks = append(u.networks, network)
go network.run()
}
for e := range u.events {
switch e := e.(type) {
case eventUpstreamConnected:
uc := e.uc
uc.network.conn = uc
uc.updateAway()
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateSupportedCaps()
sendServiceNOTICE(dc, fmt.Sprintf("connected to %s", uc.network.GetName()))
dc.updateNick()
})
uc.network.lastError = nil
case eventUpstreamDisconnected:
uc := e.uc
uc.network.conn = nil
for _, ml := range uc.messageLoggers {
if err := ml.Close(); err != nil {
uc.logger.Printf("failed to close message logger: %v", err)
}
}
uc.endPendingLISTs(true)
uc.forEachDownstream(func(dc *downstreamConn) {
dc.updateSupportedCaps()
})
if uc.network.lastError == nil {
uc.forEachDownstream(func(dc *downstreamConn) {
sendServiceNOTICE(dc, fmt.Sprintf("disconnected from %s", uc.network.GetName()))
})
}
case eventUpstreamConnectionError:
net := e.net
if net.lastError == nil || net.lastError.Error() != e.err.Error() {
net.forEachDownstream(func(dc *downstreamConn) {
sendServiceNOTICE(dc, fmt.Sprintf("failed connecting/registering to %s: %v", net.GetName(), e.err))
})
}
net.lastError = e.err
case eventUpstreamError:
uc := e.uc
uc.forEachDownstream(func(dc *downstreamConn) {
sendServiceNOTICE(dc, fmt.Sprintf("disconnected from %s: %v", uc.network.GetName(), e.err))
})
uc.network.lastError = e.err
case eventUpstreamMessage:
msg, uc := e.msg, e.uc
if uc.isClosed() {
uc.logger.Printf("ignoring message on closed connection: %v", msg)
break
}
if err := uc.handleMessage(msg); err != nil {
uc.logger.Printf("failed to handle message %q: %v", msg, err)
}
case eventDownstreamConnected:
dc := e.dc
if err := dc.welcome(); err != nil {
dc.logger.Printf("failed to handle new registered connection: %v", err)
break
}
u.downstreamConns = append(u.downstreamConns, dc)
u.forEachUpstream(func(uc *upstreamConn) {
uc.updateAway()
})
dc.updateSupportedCaps()
case eventDownstreamDisconnected:
dc := e.dc
for i := range u.downstreamConns {
if u.downstreamConns[i] == dc {
u.downstreamConns = append(u.downstreamConns[:i], u.downstreamConns[i+1:]...)
break
}
}
// Save history if we're the last client with this name
skipHistory := make(map[*network]bool)
u.forEachDownstream(func(conn *downstreamConn) {
if dc.clientName == conn.clientName {
skipHistory[conn.network] = true
}
})
dc.forEachNetwork(func(net *network) {
if skipHistory[net] || skipHistory[nil] {
return
}
net.offlineClients[dc.clientName] = struct{}{}
for target, history := range net.history {
if ch, ok := net.channels[target]; ok && ch.Detached {
continue
}
history.offlineClients[dc.clientName] = history.ring.Cur()
}
})
u.forEachUpstream(func(uc *upstreamConn) {
uc.updateAway()
})
case eventDownstreamMessage:
msg, dc := e.msg, e.dc
if dc.isClosed() {
dc.logger.Printf("ignoring message on closed connection: %v", msg)
break
}
err := dc.handleMessage(msg)
if ircErr, ok := err.(ircError); ok {
ircErr.Message.Prefix = dc.srv.prefix()
dc.SendMessage(ircErr.Message)
} else if err != nil {
dc.logger.Printf("failed to handle message %q: %v", msg, err)
dc.Close()
}
default:
u.srv.Logger.Printf("received unknown event type: %T", e)
}
}
2020-03-16 03:18:41 -07:00
}
func (u *user) createNetwork(net *Network) (*network, error) {
if net.ID != 0 {
panic("tried creating an already-existing network")
}
network := newNetwork(u, net, nil)
2020-03-16 03:18:41 -07:00
err := u.srv.db.StoreNetwork(u.Username, &network.Network)
if err != nil {
return nil, err
}
2020-03-16 03:18:41 -07:00
u.networks = append(u.networks, network)
2020-03-16 03:18:41 -07:00
go network.run()
return network, nil
}
func (u *user) deleteNetwork(id int64) error {
for i, net := range u.networks {
if net.ID != id {
continue
}
if err := u.srv.db.DeleteNetwork(net.ID); err != nil {
return err
}
u.forEachDownstream(func(dc *downstreamConn) {
if dc.network != nil && dc.network == net {
dc.Close()
}
})
net.stop()
u.networks = append(u.networks[:i], u.networks[i+1:]...)
return nil
}
panic("tried deleting a non-existing network")
}
func (u *user) updatePassword(hashed string) error {
u.User.Password = hashed
return u.srv.db.UpdatePassword(&u.User)
}