mirror of
https://github.com/AvengeMedia/DankMaterialShell.git
synced 2026-01-24 13:32:50 -05:00
core/wayland: thread-safety meta fixes + cleanups + hypr workaround
- fork go-wayland/client and modify to make it thread-safe internally - use sync.Map and atomic values in many places to cut down on mutex boilerplate - do not create extworkspace client unless explicitly requested
This commit is contained in:
@@ -13,7 +13,6 @@ require (
|
||||
github.com/holoplot/go-evdev v0.0.0-20250804134636-ab1d56a1fe83
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/yaslama/go-wayland/wayland v0.0.0-20250907155644-2874f32d9c34
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
||||
)
|
||||
|
||||
|
||||
@@ -125,8 +125,6 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/yaslama/go-wayland/wayland v0.0.0-20250907155644-2874f32d9c34 h1:iTAt1me6SBYsuzrl/CmrxtATPlOG/pVviosM3DhUdKE=
|
||||
github.com/yaslama/go-wayland/wayland v0.0.0-20250907155644-2874f32d9c34/go.mod h1:jzmUN5lUAv2O8e63OvcauV4S30rIZ1BvF/PNYE37vDo=
|
||||
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
// Generated by go-wayland-scanner
|
||||
// https://github.com/yaslama/go-wayland/cmd/go-wayland-scanner
|
||||
// https://github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/cmd/go-wayland-scanner
|
||||
// XML file : internal/proto/xml/dwl-ipc-unstable-v2.xml
|
||||
//
|
||||
// dwl_ipc_unstable_v2 Protocol Copyright:
|
||||
|
||||
package dwl_ipc
|
||||
|
||||
import "github.com/yaslama/go-wayland/wayland/client"
|
||||
import "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
|
||||
// ZdwlIpcManagerV2InterfaceName is the name of the interface as it appears in the [client.Registry].
|
||||
// It can be used to match the [client.RegistryGlobalEvent.Interface] in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Generated by go-wayland-scanner
|
||||
// https://github.com/yaslama/go-wayland/cmd/go-wayland-scanner
|
||||
// https://github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/cmd/go-wayland-scanner
|
||||
// XML file : ext-workspace-v1.xml
|
||||
//
|
||||
// ext_workspace_v1 Protocol Copyright:
|
||||
@@ -33,9 +33,10 @@ package ext_workspace
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/yaslama/go-wayland/wayland/client"
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
// registerServerProxy registers a proxy with a server-assigned ID.
|
||||
@@ -61,8 +62,9 @@ func registerServerProxy(ctx *client.Context, proxy client.Proxy, serverID uint3
|
||||
return
|
||||
}
|
||||
|
||||
objectsMap := reflect.NewAt(objectsField.Type(), unsafe.Pointer(objectsField.UnsafeAddr())).Elem()
|
||||
objectsMap.SetMapIndex(reflect.ValueOf(serverID), reflect.ValueOf(proxy))
|
||||
objectsMapPtr := unsafe.Pointer(objectsField.UnsafeAddr())
|
||||
objectsMap := (*sync.Map)(objectsMapPtr)
|
||||
objectsMap.Store(serverID, proxy)
|
||||
}
|
||||
|
||||
// ExtWorkspaceManagerV1InterfaceName is the name of the interface as it appears in the [client.Registry].
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Generated by go-wayland-scanner
|
||||
// https://github.com/yaslama/go-wayland/cmd/go-wayland-scanner
|
||||
// https://github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/cmd/go-wayland-scanner
|
||||
// XML file : wayland-protocols/wlr-gamma-control-unstable-v1.xml
|
||||
//
|
||||
// wlr_gamma_control_unstable_v1 Protocol Copyright:
|
||||
@@ -31,7 +31,7 @@
|
||||
package wlr_gamma_control
|
||||
|
||||
import (
|
||||
"github.com/yaslama/go-wayland/wayland/client"
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Generated by go-wayland-scanner
|
||||
// https://github.com/yaslama/go-wayland/cmd/go-wayland-scanner
|
||||
// https://github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/cmd/go-wayland-scanner
|
||||
// XML file : /home/brandon/repos/dankdots/wlr-output-management-unstable-v1.xml
|
||||
//
|
||||
// wlr_output_management_unstable_v1 Protocol Copyright:
|
||||
@@ -31,9 +31,10 @@ package wlr_output_management
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/yaslama/go-wayland/wayland/client"
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
func registerServerProxy(ctx *client.Context, proxy client.Proxy, serverID uint32) {
|
||||
@@ -47,9 +48,9 @@ func registerServerProxy(ctx *client.Context, proxy client.Proxy, serverID uint3
|
||||
if !objectsField.IsValid() {
|
||||
return
|
||||
}
|
||||
objectsField = reflect.NewAt(objectsField.Type(), unsafe.Pointer(objectsField.UnsafeAddr())).Elem()
|
||||
objectsMap := objectsField.Interface().(map[uint32]client.Proxy)
|
||||
objectsMap[serverID] = proxy
|
||||
objectsMapPtr := unsafe.Pointer(objectsField.UnsafeAddr())
|
||||
objectsMap := (*sync.Map)(objectsMapPtr)
|
||||
objectsMap.Store(serverID, proxy)
|
||||
}
|
||||
|
||||
// ZwlrOutputManagerV1InterfaceName is the name of the interface as it appears in the [client.Registry].
|
||||
|
||||
@@ -30,9 +30,8 @@ func NewManager() (*Manager, error) {
|
||||
PairedDevices: []Device{},
|
||||
ConnectedDevices: []Device{},
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan BluetoothState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
|
||||
stopChan: make(chan struct{}),
|
||||
dbusConn: conn,
|
||||
signals: make(chan *dbus.Signal, 256),
|
||||
@@ -430,28 +429,21 @@ func (m *Manager) notifier() {
|
||||
}
|
||||
m.updateDevices()
|
||||
|
||||
m.subMutex.RLock()
|
||||
if len(m.subscribers) == 0 {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
currentState := m.snapshotState()
|
||||
|
||||
if m.lastNotifiedState != nil && !stateChanged(m.lastNotifiedState, ¤tState) {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan BluetoothState)
|
||||
select {
|
||||
case ch <- currentState:
|
||||
default:
|
||||
}
|
||||
}
|
||||
m.subMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
stateCopy := currentState
|
||||
m.lastNotifiedState = &stateCopy
|
||||
@@ -484,19 +476,14 @@ func (m *Manager) snapshotState() BluetoothState {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan BluetoothState {
|
||||
ch := make(chan BluetoothState, 64)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Store(id, ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan BluetoothState))
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Manager) SubscribePairing(id string) chan PairingPrompt {
|
||||
@@ -618,12 +605,12 @@ func (m *Manager) Close() {
|
||||
m.agent.Close()
|
||||
}
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan BluetoothState)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan BluetoothState)
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
m.pairingSubMutex.Lock()
|
||||
for _, ch := range m.pairingSubscribers {
|
||||
|
||||
@@ -59,8 +59,7 @@ type PairingPrompt struct {
|
||||
type Manager struct {
|
||||
state *BluetoothState
|
||||
stateMutex sync.RWMutex
|
||||
subscribers map[string]chan BluetoothState
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
stopChan chan struct{}
|
||||
dbusConn *dbus.Conn
|
||||
signals chan *dbus.Signal
|
||||
|
||||
@@ -15,10 +15,8 @@ func NewManager() (*Manager, error) {
|
||||
|
||||
func NewManagerWithOptions(exponential bool) (*Manager, error) {
|
||||
m := &Manager{
|
||||
subscribers: make(map[string]chan State),
|
||||
updateSubscribers: make(map[string]chan DeviceUpdate),
|
||||
stopChan: make(chan struct{}),
|
||||
exponential: exponential,
|
||||
stopChan: make(chan struct{}),
|
||||
exponential: exponential,
|
||||
}
|
||||
|
||||
go m.initLogind()
|
||||
@@ -360,20 +358,14 @@ func (m *Manager) broadcastDeviceUpdate(deviceID string) {
|
||||
|
||||
update := DeviceUpdate{Device: *targetDevice}
|
||||
|
||||
m.subMutex.RLock()
|
||||
defer m.subMutex.RUnlock()
|
||||
|
||||
if len(m.updateSubscribers) == 0 {
|
||||
log.Debugf("No update subscribers for device: %s", deviceID)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Broadcasting device update: %s at %d%%", deviceID, targetDevice.CurrentPercent)
|
||||
|
||||
for _, ch := range m.updateSubscribers {
|
||||
m.updateSubscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan DeviceUpdate)
|
||||
select {
|
||||
case ch <- update:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
@@ -41,13 +41,11 @@ func TestManager_SetBrightness_LogindSuccess(t *testing.T) {
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
logindBackend: mockLogind,
|
||||
sysfsBackend: sysfs,
|
||||
logindReady: true,
|
||||
sysfsReady: true,
|
||||
subscribers: make(map[string]chan State),
|
||||
updateSubscribers: make(map[string]chan DeviceUpdate),
|
||||
stopChan: make(chan struct{}),
|
||||
logindBackend: mockLogind,
|
||||
sysfsBackend: sysfs,
|
||||
logindReady: true,
|
||||
sysfsReady: true,
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
m.state = State{
|
||||
@@ -115,13 +113,11 @@ func TestManager_SetBrightness_LogindFailsFallbackToSysfs(t *testing.T) {
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
logindBackend: mockLogind,
|
||||
sysfsBackend: sysfs,
|
||||
logindReady: true,
|
||||
sysfsReady: true,
|
||||
subscribers: make(map[string]chan State),
|
||||
updateSubscribers: make(map[string]chan DeviceUpdate),
|
||||
stopChan: make(chan struct{}),
|
||||
logindBackend: mockLogind,
|
||||
sysfsBackend: sysfs,
|
||||
logindReady: true,
|
||||
sysfsReady: true,
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
m.state = State{
|
||||
@@ -185,13 +181,11 @@ func TestManager_SetBrightness_NoLogind(t *testing.T) {
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
logindBackend: nil,
|
||||
sysfsBackend: sysfs,
|
||||
logindReady: false,
|
||||
sysfsReady: true,
|
||||
subscribers: make(map[string]chan State),
|
||||
updateSubscribers: make(map[string]chan DeviceUpdate),
|
||||
stopChan: make(chan struct{}),
|
||||
logindBackend: nil,
|
||||
sysfsBackend: sysfs,
|
||||
logindReady: false,
|
||||
sysfsReady: true,
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
m.state = State{
|
||||
@@ -250,13 +244,11 @@ func TestManager_SetBrightness_LEDWithLogind(t *testing.T) {
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
logindBackend: mockLogind,
|
||||
sysfsBackend: sysfs,
|
||||
logindReady: true,
|
||||
sysfsReady: true,
|
||||
subscribers: make(map[string]chan State),
|
||||
updateSubscribers: make(map[string]chan DeviceUpdate),
|
||||
stopChan: make(chan struct{}),
|
||||
logindBackend: mockLogind,
|
||||
sysfsBackend: sysfs,
|
||||
logindReady: true,
|
||||
sysfsReady: true,
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
m.state = State{
|
||||
|
||||
@@ -51,9 +51,8 @@ type Manager struct {
|
||||
stateMutex sync.RWMutex
|
||||
state State
|
||||
|
||||
subscribers map[string]chan State
|
||||
updateSubscribers map[string]chan DeviceUpdate
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
updateSubscribers sync.Map
|
||||
|
||||
broadcastMutex sync.Mutex
|
||||
broadcastTimer *time.Timer
|
||||
@@ -121,36 +120,31 @@ type SetBrightnessParams struct {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan State {
|
||||
ch := make(chan State, 16)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
|
||||
m.subscribers.Store(id, ch)
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan State))
|
||||
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
|
||||
}
|
||||
|
||||
func (m *Manager) SubscribeUpdates(id string) chan DeviceUpdate {
|
||||
ch := make(chan DeviceUpdate, 16)
|
||||
m.subMutex.Lock()
|
||||
m.updateSubscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
m.updateSubscribers.Store(id, ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) UnsubscribeUpdates(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.updateSubscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.updateSubscribers, id)
|
||||
if val, ok := m.updateSubscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan DeviceUpdate))
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Manager) NotifySubscribers() {
|
||||
@@ -158,15 +152,14 @@ func (m *Manager) NotifySubscribers() {
|
||||
state := m.state
|
||||
m.stateMutex.RUnlock()
|
||||
|
||||
m.subMutex.RLock()
|
||||
defer m.subMutex.RUnlock()
|
||||
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
select {
|
||||
case ch <- state:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Manager) GetState() State {
|
||||
@@ -178,16 +171,18 @@ func (m *Manager) GetState() State {
|
||||
func (m *Manager) Close() {
|
||||
close(m.stopChan)
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan State)
|
||||
for _, ch := range m.updateSubscribers {
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
m.updateSubscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan DeviceUpdate)
|
||||
close(ch)
|
||||
}
|
||||
m.updateSubscribers = make(map[string]chan DeviceUpdate)
|
||||
m.subMutex.Unlock()
|
||||
m.updateSubscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
if m.logindBackend != nil {
|
||||
m.logindBackend.Close()
|
||||
|
||||
@@ -35,13 +35,11 @@ func NewManager() (*Manager, error) {
|
||||
state: &CUPSState{
|
||||
Printers: make(map[string]*Printer),
|
||||
},
|
||||
client: client,
|
||||
baseURL: baseURL,
|
||||
stateMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
subscribers: make(map[string]chan CUPSState),
|
||||
subMutex: sync.RWMutex{},
|
||||
client: client,
|
||||
baseURL: baseURL,
|
||||
stateMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
if err := m.updateState(); err != nil {
|
||||
@@ -142,28 +140,22 @@ func (m *Manager) notifier() {
|
||||
if !pending {
|
||||
continue
|
||||
}
|
||||
m.subMutex.RLock()
|
||||
if len(m.subscribers) == 0 {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
currentState := m.snapshotState()
|
||||
|
||||
if m.lastNotifiedState != nil && !stateChanged(m.lastNotifiedState, ¤tState) {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan CUPSState)
|
||||
select {
|
||||
case ch <- currentState:
|
||||
default:
|
||||
}
|
||||
}
|
||||
m.subMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
stateCopy := currentState
|
||||
m.lastNotifiedState = &stateCopy
|
||||
@@ -199,10 +191,14 @@ func (m *Manager) snapshotState() CUPSState {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan CUPSState {
|
||||
ch := make(chan CUPSState, 64)
|
||||
m.subMutex.Lock()
|
||||
wasEmpty := len(m.subscribers) == 0
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
|
||||
wasEmpty := true
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
wasEmpty = false
|
||||
return false
|
||||
})
|
||||
|
||||
m.subscribers.Store(id, ch)
|
||||
|
||||
if wasEmpty && m.subscription != nil {
|
||||
if err := m.subscription.Start(); err != nil {
|
||||
@@ -217,13 +213,15 @@ func (m *Manager) Subscribe(id string) chan CUPSState {
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan CUPSState))
|
||||
}
|
||||
isEmpty := len(m.subscribers) == 0
|
||||
m.subMutex.Unlock()
|
||||
|
||||
isEmpty := true
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
isEmpty = false
|
||||
return false
|
||||
})
|
||||
|
||||
if isEmpty && m.subscription != nil {
|
||||
m.subscription.Stop()
|
||||
@@ -241,12 +239,12 @@ func (m *Manager) Close() {
|
||||
m.eventWG.Wait()
|
||||
m.notifierWg.Wait()
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan CUPSState)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan CUPSState)
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func stateChanged(old, new *CUPSState) bool {
|
||||
|
||||
@@ -13,10 +13,9 @@ func TestNewManager(t *testing.T) {
|
||||
state: &CUPSState{
|
||||
Printers: make(map[string]*Printer),
|
||||
},
|
||||
client: nil,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
subscribers: make(map[string]chan CUPSState),
|
||||
client: nil,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
assert.NotNil(t, m)
|
||||
@@ -35,10 +34,9 @@ func TestManager_GetState(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
client: mockClient,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
subscribers: make(map[string]chan CUPSState),
|
||||
client: mockClient,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
state := m.GetState()
|
||||
@@ -53,18 +51,28 @@ func TestManager_Subscribe(t *testing.T) {
|
||||
state: &CUPSState{
|
||||
Printers: make(map[string]*Printer),
|
||||
},
|
||||
client: mockClient,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
subscribers: make(map[string]chan CUPSState),
|
||||
client: mockClient,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
ch := m.Subscribe("test-client")
|
||||
assert.NotNil(t, ch)
|
||||
assert.Equal(t, 1, len(m.subscribers))
|
||||
|
||||
count := 0
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 1, count)
|
||||
|
||||
m.Unsubscribe("test-client")
|
||||
assert.Equal(t, 0, len(m.subscribers))
|
||||
count = 0
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
func TestManager_Close(t *testing.T) {
|
||||
@@ -74,10 +82,9 @@ func TestManager_Close(t *testing.T) {
|
||||
state: &CUPSState{
|
||||
Printers: make(map[string]*Printer),
|
||||
},
|
||||
client: mockClient,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
subscribers: make(map[string]chan CUPSState),
|
||||
client: mockClient,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
m.eventWG.Add(1)
|
||||
@@ -93,7 +100,12 @@ func TestManager_Close(t *testing.T) {
|
||||
}()
|
||||
|
||||
m.Close()
|
||||
assert.Equal(t, 0, len(m.subscribers))
|
||||
count := 0
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
func TestStateChanged(t *testing.T) {
|
||||
|
||||
@@ -39,8 +39,7 @@ type Manager struct {
|
||||
client CUPSClientInterface
|
||||
subscription SubscriptionManagerInterface
|
||||
stateMutex sync.RWMutex
|
||||
subscribers map[string]chan CUPSState
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
stopChan chan struct{}
|
||||
eventWG sync.WaitGroup
|
||||
dirty chan struct{}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/log"
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/proto/dwl_ipc"
|
||||
@@ -18,9 +18,9 @@ func NewManager(display *wlclient.Display) (*Manager, error) {
|
||||
cmdq: make(chan cmd, 128),
|
||||
outputSetupReq: make(chan uint32, 16),
|
||||
stopChan: make(chan struct{}),
|
||||
subscribers: make(map[string]chan State),
|
||||
dirty: make(chan struct{}, 1),
|
||||
layouts: make([]string, 0),
|
||||
|
||||
dirty: make(chan struct{}, 1),
|
||||
layouts: make([]string, 0),
|
||||
}
|
||||
|
||||
if err := m.setupRegistry(); err != nil {
|
||||
@@ -365,14 +365,6 @@ func (m *Manager) notifier() {
|
||||
if !pending {
|
||||
continue
|
||||
}
|
||||
m.subMutex.RLock()
|
||||
subCount := len(m.subscribers)
|
||||
m.subMutex.RUnlock()
|
||||
|
||||
if subCount == 0 {
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
currentState := m.GetState()
|
||||
|
||||
@@ -381,15 +373,15 @@ func (m *Manager) notifier() {
|
||||
continue
|
||||
}
|
||||
|
||||
m.subMutex.RLock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
select {
|
||||
case ch <- currentState:
|
||||
default:
|
||||
log.Warn("DWL: subscriber channel full, dropping update")
|
||||
}
|
||||
}
|
||||
m.subMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
stateCopy := currentState
|
||||
m.lastNotified = &stateCopy
|
||||
@@ -518,12 +510,12 @@ func (m *Manager) Close() {
|
||||
m.wg.Wait()
|
||||
m.notifierWg.Wait()
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan State)
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
m.outputsMutex.Lock()
|
||||
for _, out := range m.outputs {
|
||||
|
||||
@@ -3,7 +3,7 @@ package dwl
|
||||
import (
|
||||
"sync"
|
||||
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
type TagState struct {
|
||||
@@ -52,8 +52,7 @@ type Manager struct {
|
||||
stopChan chan struct{}
|
||||
wg sync.WaitGroup
|
||||
|
||||
subscribers map[string]chan State
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
dirty chan struct{}
|
||||
notifierWg sync.WaitGroup
|
||||
lastNotified *State
|
||||
@@ -92,19 +91,19 @@ func (m *Manager) GetState() State {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan State {
|
||||
ch := make(chan State, 64)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
|
||||
m.subscribers.Store(id, ch)
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan State))
|
||||
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
|
||||
}
|
||||
|
||||
func (m *Manager) notifySubscribers() {
|
||||
|
||||
@@ -47,10 +47,9 @@ func TestHandleRequest(t *testing.T) {
|
||||
mockDevice.EXPECT().ReadOne().Return(nil, errors.New("test")).Maybe()
|
||||
|
||||
m := &Manager{
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: true},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: true},
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
conn := newMockNetConn()
|
||||
@@ -77,10 +76,9 @@ func TestHandleRequest(t *testing.T) {
|
||||
mockDevice.EXPECT().ReadOne().Return(nil, errors.New("test")).Maybe()
|
||||
|
||||
m := &Manager{
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: false},
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
conn := newMockNetConn()
|
||||
@@ -107,10 +105,9 @@ func TestHandleGetState(t *testing.T) {
|
||||
mockDevice.EXPECT().ReadOne().Return(nil, errors.New("test")).Maybe()
|
||||
|
||||
m := &Manager{
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: false},
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
conn := newMockNetConn()
|
||||
|
||||
@@ -35,8 +35,7 @@ type Manager struct {
|
||||
monitoredPaths map[string]bool
|
||||
state State
|
||||
stateMutex sync.RWMutex
|
||||
subscribers map[string]chan State
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
closeChan chan struct{}
|
||||
closeOnce sync.Once
|
||||
watcher *fsnotify.Watcher
|
||||
@@ -69,9 +68,9 @@ func NewManager() (*Manager, error) {
|
||||
devices: devices,
|
||||
monitoredPaths: monitoredPaths,
|
||||
state: State{Available: true, CapsLock: initialCapsLock},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
watcher: watcher,
|
||||
|
||||
closeChan: make(chan struct{}),
|
||||
watcher: watcher,
|
||||
}
|
||||
|
||||
for i, device := range devices {
|
||||
@@ -332,37 +331,26 @@ func (m *Manager) GetState() State {
|
||||
}
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan State {
|
||||
m.subMutex.Lock()
|
||||
defer m.subMutex.Unlock()
|
||||
|
||||
ch := make(chan State, 16)
|
||||
m.subscribers[id] = ch
|
||||
m.subscribers.Store(id, ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
defer m.subMutex.Unlock()
|
||||
|
||||
ch, ok := m.subscribers[id]
|
||||
if !ok {
|
||||
return
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan State))
|
||||
}
|
||||
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
}
|
||||
|
||||
func (m *Manager) notifySubscribers(state State) {
|
||||
m.subMutex.RLock()
|
||||
defer m.subMutex.RUnlock()
|
||||
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
select {
|
||||
case ch <- state:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Manager) Close() {
|
||||
@@ -384,12 +372,12 @@ func (m *Manager) Close() {
|
||||
}
|
||||
m.devicesMutex.Unlock()
|
||||
|
||||
m.subMutex.Lock()
|
||||
for id, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,9 @@ func TestManager_Creation(t *testing.T) {
|
||||
mockDevice.EXPECT().ReadOne().Return(nil, errors.New("test")).Maybe()
|
||||
|
||||
m := &Manager{
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: false},
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
assert.NotNil(t, m)
|
||||
@@ -32,10 +31,9 @@ func TestManager_Creation(t *testing.T) {
|
||||
mockDevice.EXPECT().ReadOne().Return(nil, errors.New("test")).Maybe()
|
||||
|
||||
m := &Manager{
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: true},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: true},
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
assert.NotNil(t, m)
|
||||
@@ -52,7 +50,6 @@ func TestManager_GetState(t *testing.T) {
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
monitoredPaths: make(map[string]bool),
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
@@ -69,13 +66,17 @@ func TestManager_Subscribe(t *testing.T) {
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
monitoredPaths: make(map[string]bool),
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
ch := m.Subscribe("test-client")
|
||||
assert.NotNil(t, ch)
|
||||
assert.Len(t, m.subscribers, 1)
|
||||
count := 0
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 1, count)
|
||||
}
|
||||
|
||||
func TestManager_Unsubscribe(t *testing.T) {
|
||||
@@ -86,15 +87,24 @@ func TestManager_Unsubscribe(t *testing.T) {
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
monitoredPaths: make(map[string]bool),
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
ch := m.Subscribe("test-client")
|
||||
assert.Len(t, m.subscribers, 1)
|
||||
count := 0
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 1, count)
|
||||
|
||||
m.Unsubscribe("test-client")
|
||||
assert.Len(t, m.subscribers, 0)
|
||||
count = 0
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 0, count)
|
||||
|
||||
select {
|
||||
case _, ok := <-ch:
|
||||
@@ -112,7 +122,6 @@ func TestManager_UpdateCapsLock(t *testing.T) {
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
monitoredPaths: make(map[string]bool),
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
@@ -148,7 +157,6 @@ func TestManager_Close(t *testing.T) {
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
monitoredPaths: make(map[string]bool),
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
@@ -171,7 +179,12 @@ func TestManager_Close(t *testing.T) {
|
||||
t.Error("channel 2 should be closed")
|
||||
}
|
||||
|
||||
assert.Len(t, m.subscribers, 0)
|
||||
count := 0
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 0, count)
|
||||
|
||||
m.Close()
|
||||
}
|
||||
@@ -230,10 +243,9 @@ func TestManager_MonitorDevice(t *testing.T) {
|
||||
mockDevice.EXPECT().Close().Return(nil).Maybe()
|
||||
|
||||
m := &Manager{
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
state: State{Available: true, CapsLock: false},
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
ch := m.Subscribe("test")
|
||||
@@ -276,7 +288,6 @@ func TestNotifySubscribers(t *testing.T) {
|
||||
devices: []EvdevDevice{mockDevice},
|
||||
monitoredPaths: make(map[string]bool),
|
||||
state: State{Available: true, CapsLock: false},
|
||||
subscribers: make(map[string]chan State),
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/log"
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/proto/ext_workspace"
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
func NewManager(display *wlclient.Display) (*Manager, error) {
|
||||
@@ -19,8 +19,8 @@ func NewManager(display *wlclient.Display) (*Manager, error) {
|
||||
workspaces: make(map[uint32]*workspaceState),
|
||||
cmdq: make(chan cmd, 128),
|
||||
stopChan: make(chan struct{}),
|
||||
subscribers: make(map[string]chan State),
|
||||
dirty: make(chan struct{}, 1),
|
||||
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
m.wg.Add(1)
|
||||
@@ -389,14 +389,6 @@ func (m *Manager) notifier() {
|
||||
if !pending {
|
||||
continue
|
||||
}
|
||||
m.subMutex.RLock()
|
||||
subCount := len(m.subscribers)
|
||||
m.subMutex.RUnlock()
|
||||
|
||||
if subCount == 0 {
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
currentState := m.GetState()
|
||||
|
||||
@@ -405,15 +397,15 @@ func (m *Manager) notifier() {
|
||||
continue
|
||||
}
|
||||
|
||||
m.subMutex.RLock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
select {
|
||||
case ch <- currentState:
|
||||
default:
|
||||
log.Warn("ExtWorkspace: subscriber channel full, dropping update")
|
||||
}
|
||||
}
|
||||
m.subMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
stateCopy := currentState
|
||||
m.lastNotified = &stateCopy
|
||||
@@ -564,12 +556,12 @@ func (m *Manager) Close() {
|
||||
m.wg.Wait()
|
||||
m.notifierWg.Wait()
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan State)
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
m.workspacesMutex.Lock()
|
||||
for _, ws := range m.workspaces {
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/proto/ext_workspace"
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
type Workspace struct {
|
||||
@@ -52,8 +52,7 @@ type Manager struct {
|
||||
stopChan chan struct{}
|
||||
wg sync.WaitGroup
|
||||
|
||||
subscribers map[string]chan State
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
dirty chan struct{}
|
||||
notifierWg sync.WaitGroup
|
||||
lastNotified *State
|
||||
@@ -95,19 +94,19 @@ func (m *Manager) GetState() State {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan State {
|
||||
ch := make(chan State, 64)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
|
||||
m.subscribers.Store(id, ch)
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan State))
|
||||
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
|
||||
}
|
||||
|
||||
func (m *Manager) notifySubscribers() {
|
||||
|
||||
@@ -29,8 +29,6 @@ func NewManager() (*Manager, error) {
|
||||
systemConn: systemConn,
|
||||
sessionConn: sessionConn,
|
||||
currentUID: uint64(os.Getuid()),
|
||||
subscribers: make(map[string]chan FreedeskState),
|
||||
subMutex: sync.RWMutex{},
|
||||
}
|
||||
|
||||
m.initializeAccounts()
|
||||
@@ -206,41 +204,35 @@ func (m *Manager) GetState() FreedeskState {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan FreedeskState {
|
||||
ch := make(chan FreedeskState, 64)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Store(id, ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan FreedeskState))
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Manager) NotifySubscribers() {
|
||||
m.subMutex.RLock()
|
||||
defer m.subMutex.RUnlock()
|
||||
|
||||
state := m.GetState()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan FreedeskState)
|
||||
select {
|
||||
case ch <- state:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Manager) Close() {
|
||||
m.subMutex.Lock()
|
||||
for id, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan FreedeskState)
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
if m.systemConn != nil {
|
||||
m.systemConn.Close()
|
||||
|
||||
@@ -41,6 +41,5 @@ type Manager struct {
|
||||
accountsObj dbus.BusObject
|
||||
settingsObj dbus.BusObject
|
||||
currentUID uint64
|
||||
subscribers map[string]chan FreedeskState
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
}
|
||||
|
||||
@@ -466,9 +466,7 @@ func TestHandleSubscribe(t *testing.T) {
|
||||
SessionID: "1",
|
||||
Locked: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
}
|
||||
|
||||
conn := newMockNetConn()
|
||||
|
||||
@@ -25,13 +25,12 @@ func NewManager() (*Manager, error) {
|
||||
state: &SessionState{
|
||||
SessionID: sessionID,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
conn: conn,
|
||||
dirty: make(chan struct{}, 1),
|
||||
signals: make(chan *dbus.Signal, 256),
|
||||
stateMutex: sync.RWMutex{},
|
||||
|
||||
stopChan: make(chan struct{}),
|
||||
conn: conn,
|
||||
dirty: make(chan struct{}, 1),
|
||||
signals: make(chan *dbus.Signal, 256),
|
||||
}
|
||||
m.sleepInhibitorEnabled.Store(true)
|
||||
|
||||
@@ -351,19 +350,14 @@ func (m *Manager) GetState() SessionState {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan SessionState {
|
||||
ch := make(chan SessionState, 64)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Store(id, ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan SessionState))
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Manager) notifier() {
|
||||
@@ -387,28 +381,22 @@ func (m *Manager) notifier() {
|
||||
if !pending {
|
||||
continue
|
||||
}
|
||||
m.subMutex.RLock()
|
||||
if len(m.subscribers) == 0 {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
currentState := m.snapshotState()
|
||||
|
||||
if m.lastNotifiedState != nil && !stateChangedMeaningfully(m.lastNotifiedState, ¤tState) {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan SessionState)
|
||||
select {
|
||||
case ch <- currentState:
|
||||
default:
|
||||
}
|
||||
}
|
||||
m.subMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
stateCopy := currentState
|
||||
m.lastNotifiedState = &stateCopy
|
||||
@@ -584,12 +572,12 @@ func (m *Manager) Close() {
|
||||
|
||||
m.releaseSleepInhibitor()
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan SessionState)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan SessionState)
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
if m.conn != nil {
|
||||
m.conn.Close()
|
||||
|
||||
@@ -34,26 +34,20 @@ func TestManager_GetState(t *testing.T) {
|
||||
|
||||
func TestManager_Subscribe(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &SessionState{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
state: &SessionState{},
|
||||
}
|
||||
|
||||
ch := manager.Subscribe("test-client")
|
||||
assert.NotNil(t, ch)
|
||||
assert.Equal(t, 64, cap(ch))
|
||||
|
||||
manager.subMutex.RLock()
|
||||
_, exists := manager.subscribers["test-client"]
|
||||
manager.subMutex.RUnlock()
|
||||
_, exists := manager.subscribers.Load("test-client")
|
||||
assert.True(t, exists)
|
||||
}
|
||||
|
||||
func TestManager_Unsubscribe(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &SessionState{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
state: &SessionState{},
|
||||
}
|
||||
|
||||
ch := manager.Subscribe("test-client")
|
||||
@@ -63,17 +57,13 @@ func TestManager_Unsubscribe(t *testing.T) {
|
||||
_, ok := <-ch
|
||||
assert.False(t, ok)
|
||||
|
||||
manager.subMutex.RLock()
|
||||
_, exists := manager.subscribers["test-client"]
|
||||
manager.subMutex.RUnlock()
|
||||
_, exists := manager.subscribers.Load("test-client")
|
||||
assert.False(t, exists)
|
||||
}
|
||||
|
||||
func TestManager_Unsubscribe_NonExistent(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &SessionState{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
state: &SessionState{},
|
||||
}
|
||||
|
||||
// Unsubscribe a non-existent client should not panic
|
||||
@@ -88,19 +78,15 @@ func TestManager_NotifySubscribers(t *testing.T) {
|
||||
SessionID: "1",
|
||||
Locked: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
manager.notifierWg.Add(1)
|
||||
go manager.notifier()
|
||||
|
||||
ch := make(chan SessionState, 10)
|
||||
manager.subMutex.Lock()
|
||||
manager.subscribers["test-client"] = ch
|
||||
manager.subMutex.Unlock()
|
||||
manager.subscribers.Store("test-client", ch)
|
||||
|
||||
manager.notifySubscribers()
|
||||
|
||||
@@ -122,19 +108,15 @@ func TestManager_NotifySubscribers_Debounce(t *testing.T) {
|
||||
SessionID: "1",
|
||||
Locked: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
manager.notifierWg.Add(1)
|
||||
go manager.notifier()
|
||||
|
||||
ch := make(chan SessionState, 10)
|
||||
manager.subMutex.Lock()
|
||||
manager.subscribers["test-client"] = ch
|
||||
manager.subMutex.Unlock()
|
||||
manager.subscribers.Store("test-client", ch)
|
||||
|
||||
manager.notifySubscribers()
|
||||
manager.notifySubscribers()
|
||||
@@ -157,19 +139,15 @@ func TestManager_NotifySubscribers_Debounce(t *testing.T) {
|
||||
|
||||
func TestManager_Close(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &SessionState{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
state: &SessionState{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
ch1 := make(chan SessionState, 1)
|
||||
ch2 := make(chan SessionState, 1)
|
||||
manager.subMutex.Lock()
|
||||
manager.subscribers["client1"] = ch1
|
||||
manager.subscribers["client2"] = ch2
|
||||
manager.subMutex.Unlock()
|
||||
manager.subscribers.Store("client1", ch1)
|
||||
manager.subscribers.Store("client2", ch2)
|
||||
|
||||
manager.Close()
|
||||
|
||||
@@ -184,7 +162,12 @@ func TestManager_Close(t *testing.T) {
|
||||
assert.False(t, ok1, "ch1 should be closed")
|
||||
assert.False(t, ok2, "ch2 should be closed")
|
||||
|
||||
assert.Len(t, manager.subscribers, 0)
|
||||
count := 0
|
||||
manager.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
func TestManager_GetState_ThreadSafe(t *testing.T) {
|
||||
|
||||
@@ -14,10 +14,8 @@ func TestManager_HandleDBusSignal_Lock(t *testing.T) {
|
||||
Locked: false,
|
||||
LockedHint: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -38,10 +36,8 @@ func TestManager_HandleDBusSignal_Unlock(t *testing.T) {
|
||||
Locked: true,
|
||||
LockedHint: true,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -62,10 +58,8 @@ func TestManager_HandleDBusSignal_PrepareForSleep(t *testing.T) {
|
||||
state: &SessionState{
|
||||
PreparingForSleep: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -85,10 +79,8 @@ func TestManager_HandleDBusSignal_PrepareForSleep(t *testing.T) {
|
||||
state: &SessionState{
|
||||
PreparingForSleep: true,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -108,10 +100,8 @@ func TestManager_HandleDBusSignal_PrepareForSleep(t *testing.T) {
|
||||
state: &SessionState{
|
||||
PreparingForSleep: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -133,10 +123,8 @@ func TestManager_HandlePropertiesChanged(t *testing.T) {
|
||||
state: &SessionState{
|
||||
Active: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -161,10 +149,8 @@ func TestManager_HandlePropertiesChanged(t *testing.T) {
|
||||
state: &SessionState{
|
||||
IdleHint: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -189,10 +175,8 @@ func TestManager_HandlePropertiesChanged(t *testing.T) {
|
||||
state: &SessionState{
|
||||
IdleSinceHint: 0,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -218,10 +202,8 @@ func TestManager_HandlePropertiesChanged(t *testing.T) {
|
||||
LockedHint: false,
|
||||
Locked: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -247,10 +229,8 @@ func TestManager_HandlePropertiesChanged(t *testing.T) {
|
||||
state: &SessionState{
|
||||
Active: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -272,11 +252,9 @@ func TestManager_HandlePropertiesChanged(t *testing.T) {
|
||||
|
||||
t.Run("empty body", func(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &SessionState{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
state: &SessionState{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
@@ -295,10 +273,8 @@ func TestManager_HandlePropertiesChanged(t *testing.T) {
|
||||
Active: false,
|
||||
IdleHint: false,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan SessionState),
|
||||
subMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
sig := &dbus.Signal{
|
||||
|
||||
@@ -50,8 +50,7 @@ type SessionEvent struct {
|
||||
type Manager struct {
|
||||
state *SessionState
|
||||
stateMutex sync.RWMutex
|
||||
subscribers map[string]chan SessionState
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
stopChan chan struct{}
|
||||
conn *dbus.Conn
|
||||
sessionPath dbus.ObjectPath
|
||||
|
||||
@@ -240,19 +240,25 @@ func TestHandleSubscribe(t *testing.T) {
|
||||
|
||||
func TestManager_Subscribe_Unsubscribe(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &NetworkState{},
|
||||
subscribers: make(map[string]chan NetworkState),
|
||||
state: &NetworkState{},
|
||||
}
|
||||
|
||||
t.Run("subscribe creates channel", func(t *testing.T) {
|
||||
ch := manager.Subscribe("client1")
|
||||
assert.NotNil(t, ch)
|
||||
assert.Len(t, manager.subscribers, 1)
|
||||
count := 0
|
||||
manager.subscribers.Range(func(key, value interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 1, count)
|
||||
})
|
||||
|
||||
t.Run("unsubscribe removes channel", func(t *testing.T) {
|
||||
manager.Unsubscribe("client1")
|
||||
assert.Len(t, manager.subscribers, 0)
|
||||
count := 0
|
||||
manager.subscribers.Range(func(key, value interface{}) bool { count++; return true })
|
||||
assert.Equal(t, 0, count)
|
||||
})
|
||||
|
||||
t.Run("unsubscribe non-existent client is safe", func(t *testing.T) {
|
||||
|
||||
@@ -66,9 +66,8 @@ func NewManager() (*Manager, error) {
|
||||
Preference: PreferenceAuto,
|
||||
WiFiNetworks: []WiFiNetwork{},
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan NetworkState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
credentialSubscribers: make(map[string]chan CredentialPrompt),
|
||||
@@ -270,19 +269,14 @@ func (m *Manager) GetState() NetworkState {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan NetworkState {
|
||||
ch := make(chan NetworkState, 64)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Store(id, ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan NetworkState))
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Manager) SubscribeCredentials(id string) chan CredentialPrompt {
|
||||
@@ -335,28 +329,22 @@ func (m *Manager) notifier() {
|
||||
if !pending {
|
||||
continue
|
||||
}
|
||||
m.subMutex.RLock()
|
||||
if len(m.subscribers) == 0 {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
currentState := m.snapshotState()
|
||||
|
||||
if m.lastNotifiedState != nil && !stateChangedMeaningfully(m.lastNotifiedState, ¤tState) {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan NetworkState)
|
||||
select {
|
||||
case ch <- currentState:
|
||||
default:
|
||||
}
|
||||
}
|
||||
m.subMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
stateCopy := currentState
|
||||
m.lastNotifiedState = &stateCopy
|
||||
@@ -396,12 +384,12 @@ func (m *Manager) Close() {
|
||||
m.backend.Close()
|
||||
}
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan NetworkState)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan NetworkState)
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Manager) ScanWiFi() error {
|
||||
|
||||
@@ -31,19 +31,15 @@ func TestManager_NotifySubscribers(t *testing.T) {
|
||||
state: &NetworkState{
|
||||
NetworkStatus: StatusWiFi,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan NetworkState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
manager.notifierWg.Add(1)
|
||||
go manager.notifier()
|
||||
|
||||
ch := make(chan NetworkState, 10)
|
||||
manager.subMutex.Lock()
|
||||
manager.subscribers["test-client"] = ch
|
||||
manager.subMutex.Unlock()
|
||||
manager.subscribers.Store("test-client", ch)
|
||||
|
||||
manager.notifySubscribers()
|
||||
|
||||
@@ -63,19 +59,15 @@ func TestManager_NotifySubscribers_Debounce(t *testing.T) {
|
||||
state: &NetworkState{
|
||||
NetworkStatus: StatusWiFi,
|
||||
},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan NetworkState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
stateMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
manager.notifierWg.Add(1)
|
||||
go manager.notifier()
|
||||
|
||||
ch := make(chan NetworkState, 10)
|
||||
manager.subMutex.Lock()
|
||||
manager.subscribers["test-client"] = ch
|
||||
manager.subMutex.Unlock()
|
||||
manager.subscribers.Store("test-client", ch)
|
||||
|
||||
manager.notifySubscribers()
|
||||
manager.notifySubscribers()
|
||||
@@ -98,19 +90,15 @@ func TestManager_NotifySubscribers_Debounce(t *testing.T) {
|
||||
|
||||
func TestManager_Close(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &NetworkState{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
subscribers: make(map[string]chan NetworkState),
|
||||
subMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
state: &NetworkState{},
|
||||
stateMutex: sync.RWMutex{},
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
ch1 := make(chan NetworkState, 1)
|
||||
ch2 := make(chan NetworkState, 1)
|
||||
manager.subMutex.Lock()
|
||||
manager.subscribers["client1"] = ch1
|
||||
manager.subscribers["client2"] = ch2
|
||||
manager.subMutex.Unlock()
|
||||
manager.subscribers.Store("client1", ch1)
|
||||
manager.subscribers.Store("client2", ch2)
|
||||
|
||||
manager.Close()
|
||||
|
||||
@@ -125,31 +113,27 @@ func TestManager_Close(t *testing.T) {
|
||||
assert.False(t, ok1, "ch1 should be closed")
|
||||
assert.False(t, ok2, "ch2 should be closed")
|
||||
|
||||
assert.Len(t, manager.subscribers, 0)
|
||||
count := 0
|
||||
manager.subscribers.Range(func(key, value interface{}) bool { count++; return true })
|
||||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
func TestManager_Subscribe(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &NetworkState{},
|
||||
subscribers: make(map[string]chan NetworkState),
|
||||
subMutex: sync.RWMutex{},
|
||||
state: &NetworkState{},
|
||||
}
|
||||
|
||||
ch := manager.Subscribe("test-client")
|
||||
assert.NotNil(t, ch)
|
||||
assert.Equal(t, 64, cap(ch))
|
||||
|
||||
manager.subMutex.RLock()
|
||||
_, exists := manager.subscribers["test-client"]
|
||||
manager.subMutex.RUnlock()
|
||||
_, exists := manager.subscribers.Load("test-client")
|
||||
assert.True(t, exists)
|
||||
}
|
||||
|
||||
func TestManager_Unsubscribe(t *testing.T) {
|
||||
manager := &Manager{
|
||||
state: &NetworkState{},
|
||||
subscribers: make(map[string]chan NetworkState),
|
||||
subMutex: sync.RWMutex{},
|
||||
state: &NetworkState{},
|
||||
}
|
||||
|
||||
ch := manager.Subscribe("test-client")
|
||||
@@ -159,9 +143,7 @@ func TestManager_Unsubscribe(t *testing.T) {
|
||||
_, ok := <-ch
|
||||
assert.False(t, ok)
|
||||
|
||||
manager.subMutex.RLock()
|
||||
_, exists := manager.subscribers["test-client"]
|
||||
manager.subMutex.RUnlock()
|
||||
_, exists := manager.subscribers.Load("test-client")
|
||||
assert.False(t, exists)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,10 +6,9 @@ func NewTestManager(backend Backend, state *NetworkState) *Manager {
|
||||
state = &NetworkState{}
|
||||
}
|
||||
return &Manager{
|
||||
backend: backend,
|
||||
state: state,
|
||||
subscribers: make(map[string]chan NetworkState),
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
backend: backend,
|
||||
state: state,
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,8 +108,7 @@ type Manager struct {
|
||||
backend Backend
|
||||
state *NetworkState
|
||||
stateMutex sync.RWMutex
|
||||
subscribers map[string]chan NetworkState
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
stopChan chan struct{}
|
||||
dirty chan struct{}
|
||||
notifierWg sync.WaitGroup
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
@@ -58,11 +59,9 @@ var wlrOutputManager *wlroutput.Manager
|
||||
var evdevManager *evdev.Manager
|
||||
var wlContext *wlcontext.SharedContext
|
||||
|
||||
var capabilitySubscribers = make(map[string]chan ServerInfo)
|
||||
var capabilityMutex sync.RWMutex
|
||||
|
||||
var cupsSubscribers = make(map[string]bool)
|
||||
var cupsSubscribersMutex sync.Mutex
|
||||
var capabilitySubscribers sync.Map
|
||||
var cupsSubscribers sync.Map
|
||||
var cupsSubscriberCount atomic.Int32
|
||||
|
||||
func getSocketDir() string {
|
||||
if runtime := os.Getenv("XDG_RUNTIME_DIR"); runtime != "" {
|
||||
@@ -434,16 +433,15 @@ func getServerInfo() ServerInfo {
|
||||
}
|
||||
|
||||
func notifyCapabilityChange() {
|
||||
capabilityMutex.RLock()
|
||||
defer capabilityMutex.RUnlock()
|
||||
|
||||
info := getServerInfo()
|
||||
for _, ch := range capabilitySubscribers {
|
||||
capabilitySubscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan ServerInfo)
|
||||
select {
|
||||
case ch <- info:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func handleSubscribe(conn net.Conn, req models.Request) {
|
||||
@@ -475,18 +473,12 @@ func handleSubscribe(conn net.Conn, req models.Request) {
|
||||
stopChan := make(chan struct{})
|
||||
|
||||
capChan := make(chan ServerInfo, 64)
|
||||
capabilityMutex.Lock()
|
||||
capabilitySubscribers[clientID+"-capabilities"] = capChan
|
||||
capabilityMutex.Unlock()
|
||||
capabilitySubscribers.Store(clientID+"-capabilities", capChan)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
capabilityMutex.Lock()
|
||||
delete(capabilitySubscribers, clientID+"-capabilities")
|
||||
capabilityMutex.Unlock()
|
||||
}()
|
||||
defer capabilitySubscribers.Delete(clientID + "-capabilities")
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -728,12 +720,10 @@ func handleSubscribe(conn net.Conn, req models.Request) {
|
||||
}
|
||||
|
||||
if shouldSubscribe("cups") {
|
||||
cupsSubscribersMutex.Lock()
|
||||
wasEmpty := len(cupsSubscribers) == 0
|
||||
cupsSubscribers[clientID+"-cups"] = true
|
||||
cupsSubscribersMutex.Unlock()
|
||||
cupsSubscribers.Store(clientID+"-cups", true)
|
||||
count := cupsSubscriberCount.Add(1)
|
||||
|
||||
if wasEmpty {
|
||||
if count == 1 {
|
||||
if err := InitializeCupsManager(); err != nil {
|
||||
log.Warnf("Failed to initialize CUPS manager for subscription: %v", err)
|
||||
} else {
|
||||
@@ -748,13 +738,10 @@ func handleSubscribe(conn net.Conn, req models.Request) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
cupsManager.Unsubscribe(clientID + "-cups")
|
||||
cupsSubscribers.Delete(clientID + "-cups")
|
||||
count := cupsSubscriberCount.Add(-1)
|
||||
|
||||
cupsSubscribersMutex.Lock()
|
||||
delete(cupsSubscribers, clientID+"-cups")
|
||||
isEmpty := len(cupsSubscribers) == 0
|
||||
cupsSubscribersMutex.Unlock()
|
||||
|
||||
if isEmpty {
|
||||
if count == 0 {
|
||||
log.Info("Last CUPS subscriber disconnected, shutting down CUPS manager")
|
||||
if cupsManager != nil {
|
||||
cupsManager.Close()
|
||||
@@ -822,36 +809,46 @@ func handleSubscribe(conn net.Conn, req models.Request) {
|
||||
}()
|
||||
}
|
||||
|
||||
if shouldSubscribe("extworkspace") && extWorkspaceManager != nil {
|
||||
wg.Add(1)
|
||||
extWorkspaceChan := extWorkspaceManager.Subscribe(clientID + "-extworkspace")
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer extWorkspaceManager.Unsubscribe(clientID + "-extworkspace")
|
||||
|
||||
initialState := extWorkspaceManager.GetState()
|
||||
select {
|
||||
case eventChan <- ServiceEvent{Service: "extworkspace", Data: initialState}:
|
||||
case <-stopChan:
|
||||
return
|
||||
if shouldSubscribe("extworkspace") {
|
||||
if extWorkspaceManager == nil {
|
||||
if err := InitializeExtWorkspaceManager(); err != nil {
|
||||
log.Warnf("Failed to initialize ExtWorkspace manager for subscription: %v", err)
|
||||
} else {
|
||||
notifyCapabilityChange()
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
if extWorkspaceManager != nil {
|
||||
wg.Add(1)
|
||||
extWorkspaceChan := extWorkspaceManager.Subscribe(clientID + "-extworkspace")
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer extWorkspaceManager.Unsubscribe(clientID + "-extworkspace")
|
||||
|
||||
initialState := extWorkspaceManager.GetState()
|
||||
select {
|
||||
case state, ok := <-extWorkspaceChan:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case eventChan <- ServiceEvent{Service: "extworkspace", Data: state}:
|
||||
case <-stopChan:
|
||||
return
|
||||
}
|
||||
case eventChan <- ServiceEvent{Service: "extworkspace", Data: initialState}:
|
||||
case <-stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case state, ok := <-extWorkspaceChan:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case eventChan <- ServiceEvent{Service: "extworkspace", Data: state}:
|
||||
case <-stopChan:
|
||||
return
|
||||
}
|
||||
case <-stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
if shouldSubscribe("brightness") && brightnessManager != nil {
|
||||
@@ -1244,10 +1241,6 @@ func Start(printDocs bool) error {
|
||||
log.Debugf("DWL manager unavailable: %v", err)
|
||||
}
|
||||
|
||||
if err := InitializeExtWorkspaceManager(); err != nil {
|
||||
log.Debugf("ExtWorkspace manager unavailable: %v", err)
|
||||
}
|
||||
|
||||
if err := InitializeWlrOutputManager(); err != nil {
|
||||
log.Debugf("WlrOutput manager unavailable: %v", err)
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
"github.com/godbus/dbus/v5"
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/errdefs"
|
||||
@@ -23,14 +23,14 @@ func NewManager(display *wlclient.Display, config Config) (*Manager, error) {
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
config: config,
|
||||
display: display,
|
||||
ctx: display.Context(),
|
||||
outputs: make(map[uint32]*outputState),
|
||||
cmdq: make(chan cmd, 128),
|
||||
stopChan: make(chan struct{}),
|
||||
updateTrigger: make(chan struct{}, 1),
|
||||
subscribers: make(map[string]chan State),
|
||||
config: config,
|
||||
display: display,
|
||||
ctx: display.Context(),
|
||||
outputs: make(map[uint32]*outputState),
|
||||
cmdq: make(chan cmd, 128),
|
||||
stopChan: make(chan struct{}),
|
||||
updateTrigger: make(chan struct{}, 1),
|
||||
|
||||
dirty: make(chan struct{}, 1),
|
||||
dbusSignal: make(chan *dbus.Signal, 16),
|
||||
transitionChan: make(chan int, 1),
|
||||
@@ -935,28 +935,22 @@ func (m *Manager) notifier() {
|
||||
if !pending {
|
||||
continue
|
||||
}
|
||||
m.subMutex.RLock()
|
||||
if len(m.subscribers) == 0 {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
currentState := m.GetState()
|
||||
|
||||
if m.lastNotified != nil && !stateChanged(m.lastNotified, ¤tState) {
|
||||
m.subMutex.RUnlock()
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
select {
|
||||
case ch <- currentState:
|
||||
default:
|
||||
}
|
||||
}
|
||||
m.subMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
stateCopy := currentState
|
||||
m.lastNotified = &stateCopy
|
||||
@@ -1332,12 +1326,12 @@ func (m *Manager) Close() {
|
||||
m.wg.Wait()
|
||||
m.notifierWg.Wait()
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan State)
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
m.outputsMutex.Lock()
|
||||
for _, out := range m.outputs {
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/errdefs"
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
"github.com/godbus/dbus/v5"
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
@@ -69,8 +69,7 @@ type Manager struct {
|
||||
cachedIPLon *float64
|
||||
locationMutex sync.RWMutex
|
||||
|
||||
subscribers map[string]chan State
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
dirty chan struct{}
|
||||
notifierWg sync.WaitGroup
|
||||
lastNotified *State
|
||||
@@ -147,19 +146,14 @@ func (m *Manager) GetState() State {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan State {
|
||||
ch := make(chan State, 64)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Store(id, ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan State))
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Manager) notifySubscribers() {
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/errdefs"
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/log"
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
type SharedContext struct {
|
||||
|
||||
@@ -154,14 +154,14 @@ func (m *Manager) ApplyConfiguration(heads []HeadConfig, test bool) error {
|
||||
statusChan <- fmt.Errorf("configuration cancelled (outdated serial)")
|
||||
})
|
||||
|
||||
m.headsMutex.RLock()
|
||||
headsByName := make(map[string]*headState)
|
||||
for _, head := range m.heads {
|
||||
m.heads.Range(func(key, value interface{}) bool {
|
||||
head := value.(*headState)
|
||||
if !head.finished {
|
||||
headsByName[head.name] = head
|
||||
}
|
||||
}
|
||||
m.headsMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
for _, headCfg := range heads {
|
||||
head, exists := headsByName[headCfg.Name]
|
||||
@@ -188,15 +188,14 @@ func (m *Manager) ApplyConfiguration(heads []HeadConfig, test bool) error {
|
||||
}
|
||||
|
||||
if headCfg.ModeID != nil {
|
||||
m.modesMutex.RLock()
|
||||
mode, exists := m.modes[*headCfg.ModeID]
|
||||
m.modesMutex.RUnlock()
|
||||
val, exists := m.modes.Load(*headCfg.ModeID)
|
||||
|
||||
if !exists {
|
||||
config.Destroy()
|
||||
resultChan <- fmt.Errorf("mode not found: %d", *headCfg.ModeID)
|
||||
return
|
||||
}
|
||||
mode := val.(*modeState)
|
||||
|
||||
if err := headConfig.SetMode(mode.handle); err != nil {
|
||||
config.Destroy()
|
||||
|
||||
@@ -6,20 +6,17 @@ import (
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/log"
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/proto/wlr_output_management"
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
func NewManager(display *wlclient.Display) (*Manager, error) {
|
||||
m := &Manager{
|
||||
display: display,
|
||||
ctx: display.Context(),
|
||||
heads: make(map[uint32]*headState),
|
||||
modes: make(map[uint32]*modeState),
|
||||
cmdq: make(chan cmd, 128),
|
||||
stopChan: make(chan struct{}),
|
||||
subscribers: make(map[string]chan State),
|
||||
dirty: make(chan struct{}, 1),
|
||||
fatalError: make(chan error, 1),
|
||||
display: display,
|
||||
ctx: display.Context(),
|
||||
cmdq: make(chan cmd, 128),
|
||||
stopChan: make(chan struct{}),
|
||||
dirty: make(chan struct{}, 1),
|
||||
fatalError: make(chan error, 1),
|
||||
}
|
||||
|
||||
m.wg.Add(1)
|
||||
@@ -143,9 +140,7 @@ func (m *Manager) handleHead(e wlr_output_management.ZwlrOutputManagerV1HeadEven
|
||||
modeIDs: make([]uint32, 0),
|
||||
}
|
||||
|
||||
m.headsMutex.Lock()
|
||||
m.heads[headID] = head
|
||||
m.headsMutex.Unlock()
|
||||
m.heads.Store(headID, head)
|
||||
|
||||
handle.SetNameHandler(func(e wlr_output_management.ZwlrOutputHeadV1NameEvent) {
|
||||
log.Debugf("WlrOutput: Head %d name: %s", headID, e.Name)
|
||||
@@ -254,9 +249,7 @@ func (m *Manager) handleHead(e wlr_output_management.ZwlrOutputManagerV1HeadEven
|
||||
log.Debugf("WlrOutput: Head %d finished", headID)
|
||||
head.finished = true
|
||||
|
||||
m.headsMutex.Lock()
|
||||
delete(m.heads, headID)
|
||||
m.headsMutex.Unlock()
|
||||
m.heads.Delete(headID)
|
||||
|
||||
m.post(func() {
|
||||
m.wlMutex.Lock()
|
||||
@@ -279,15 +272,13 @@ func (m *Manager) handleMode(headID uint32, e wlr_output_management.ZwlrOutputHe
|
||||
handle: handle,
|
||||
}
|
||||
|
||||
m.modesMutex.Lock()
|
||||
m.modes[modeID] = mode
|
||||
m.modesMutex.Unlock()
|
||||
m.modes.Store(modeID, mode)
|
||||
|
||||
m.headsMutex.Lock()
|
||||
if head, ok := m.heads[headID]; ok {
|
||||
if val, ok := m.heads.Load(headID); ok {
|
||||
head := val.(*headState)
|
||||
head.modeIDs = append(head.modeIDs, modeID)
|
||||
m.heads.Store(headID, head)
|
||||
}
|
||||
m.headsMutex.Unlock()
|
||||
|
||||
handle.SetSizeHandler(func(e wlr_output_management.ZwlrOutputModeV1SizeEvent) {
|
||||
log.Debugf("WlrOutput: Mode %d size: %dx%d", modeID, e.Width, e.Height)
|
||||
@@ -318,9 +309,7 @@ func (m *Manager) handleMode(headID uint32, e wlr_output_management.ZwlrOutputHe
|
||||
log.Debugf("WlrOutput: Mode %d finished", modeID)
|
||||
mode.finished = true
|
||||
|
||||
m.modesMutex.Lock()
|
||||
delete(m.modes, modeID)
|
||||
m.modesMutex.Unlock()
|
||||
m.modes.Delete(modeID)
|
||||
|
||||
m.post(func() {
|
||||
m.wlMutex.Lock()
|
||||
@@ -333,22 +322,24 @@ func (m *Manager) handleMode(headID uint32, e wlr_output_management.ZwlrOutputHe
|
||||
}
|
||||
|
||||
func (m *Manager) updateState() {
|
||||
m.headsMutex.RLock()
|
||||
m.modesMutex.RLock()
|
||||
|
||||
outputs := make([]Output, 0)
|
||||
|
||||
for _, head := range m.heads {
|
||||
m.heads.Range(func(key, value interface{}) bool {
|
||||
head := value.(*headState)
|
||||
if head.finished {
|
||||
continue
|
||||
return true
|
||||
}
|
||||
|
||||
modes := make([]OutputMode, 0)
|
||||
var currentMode *OutputMode
|
||||
|
||||
for _, modeID := range head.modeIDs {
|
||||
mode, exists := m.modes[modeID]
|
||||
if !exists || mode.finished {
|
||||
val, exists := m.modes.Load(modeID)
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
mode := val.(*modeState)
|
||||
if mode.finished {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -385,10 +376,8 @@ func (m *Manager) updateState() {
|
||||
ID: head.id,
|
||||
}
|
||||
outputs = append(outputs, output)
|
||||
}
|
||||
|
||||
m.modesMutex.RUnlock()
|
||||
m.headsMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
newState := State{
|
||||
Outputs: outputs,
|
||||
@@ -442,14 +431,6 @@ func (m *Manager) notifier() {
|
||||
if !pending {
|
||||
continue
|
||||
}
|
||||
m.subMutex.RLock()
|
||||
subCount := len(m.subscribers)
|
||||
m.subMutex.RUnlock()
|
||||
|
||||
if subCount == 0 {
|
||||
pending = false
|
||||
continue
|
||||
}
|
||||
|
||||
currentState := m.GetState()
|
||||
|
||||
@@ -458,15 +439,15 @@ func (m *Manager) notifier() {
|
||||
continue
|
||||
}
|
||||
|
||||
m.subMutex.RLock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
select {
|
||||
case ch <- currentState:
|
||||
default:
|
||||
log.Warn("WlrOutput: subscriber channel full, dropping update")
|
||||
}
|
||||
}
|
||||
m.subMutex.RUnlock()
|
||||
return true
|
||||
})
|
||||
|
||||
stateCopy := currentState
|
||||
m.lastNotified = &stateCopy
|
||||
@@ -480,30 +461,30 @@ func (m *Manager) Close() {
|
||||
m.wg.Wait()
|
||||
m.notifierWg.Wait()
|
||||
|
||||
m.subMutex.Lock()
|
||||
for _, ch := range m.subscribers {
|
||||
m.subscribers.Range(func(key, value interface{}) bool {
|
||||
ch := value.(chan State)
|
||||
close(ch)
|
||||
}
|
||||
m.subscribers = make(map[string]chan State)
|
||||
m.subMutex.Unlock()
|
||||
m.subscribers.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
m.modesMutex.Lock()
|
||||
for _, mode := range m.modes {
|
||||
m.modes.Range(func(key, value interface{}) bool {
|
||||
mode := value.(*modeState)
|
||||
if mode.handle != nil {
|
||||
mode.handle.Release()
|
||||
}
|
||||
}
|
||||
m.modes = make(map[uint32]*modeState)
|
||||
m.modesMutex.Unlock()
|
||||
m.modes.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
m.headsMutex.Lock()
|
||||
for _, head := range m.heads {
|
||||
m.heads.Range(func(key, value interface{}) bool {
|
||||
head := value.(*headState)
|
||||
if head.handle != nil {
|
||||
head.handle.Release()
|
||||
}
|
||||
}
|
||||
m.heads = make(map[uint32]*headState)
|
||||
m.headsMutex.Unlock()
|
||||
m.heads.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
if m.manager != nil {
|
||||
m.manager.Stop()
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/internal/proto/wlr_output_management"
|
||||
wlclient "github.com/yaslama/go-wayland/wayland/client"
|
||||
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
type OutputMode struct {
|
||||
@@ -49,11 +49,8 @@ type Manager struct {
|
||||
registry *wlclient.Registry
|
||||
manager *wlr_output_management.ZwlrOutputManagerV1
|
||||
|
||||
headsMutex sync.RWMutex
|
||||
heads map[uint32]*headState
|
||||
|
||||
modesMutex sync.RWMutex
|
||||
modes map[uint32]*modeState
|
||||
heads sync.Map // map[uint32]*headState
|
||||
modes sync.Map // map[uint32]*modeState
|
||||
|
||||
serial uint32
|
||||
|
||||
@@ -62,8 +59,7 @@ type Manager struct {
|
||||
stopChan chan struct{}
|
||||
wg sync.WaitGroup
|
||||
|
||||
subscribers map[string]chan State
|
||||
subMutex sync.RWMutex
|
||||
subscribers sync.Map
|
||||
dirty chan struct{}
|
||||
notifierWg sync.WaitGroup
|
||||
lastNotified *State
|
||||
@@ -120,19 +116,19 @@ func (m *Manager) GetState() State {
|
||||
|
||||
func (m *Manager) Subscribe(id string) chan State {
|
||||
ch := make(chan State, 64)
|
||||
m.subMutex.Lock()
|
||||
m.subscribers[id] = ch
|
||||
m.subMutex.Unlock()
|
||||
|
||||
m.subscribers.Store(id, ch)
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *Manager) Unsubscribe(id string) {
|
||||
m.subMutex.Lock()
|
||||
if ch, ok := m.subscribers[id]; ok {
|
||||
close(ch)
|
||||
delete(m.subscribers, id)
|
||||
|
||||
if val, ok := m.subscribers.LoadAndDelete(id); ok {
|
||||
close(val.(chan State))
|
||||
|
||||
}
|
||||
m.subMutex.Unlock()
|
||||
|
||||
}
|
||||
|
||||
func (m *Manager) notifySubscribers() {
|
||||
|
||||
3
core/pkg/go-wayland/AUTHORS
Normal file
3
core/pkg/go-wayland/AUTHORS
Normal file
@@ -0,0 +1,3 @@
|
||||
// Keep this sorted
|
||||
|
||||
rajveermalviya
|
||||
24
core/pkg/go-wayland/LICENSE
Normal file
24
core/pkg/go-wayland/LICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
Copyright 2021 go-wayland authors
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
25
core/pkg/go-wayland/README.md
Normal file
25
core/pkg/go-wayland/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Wayland implementation in Go
|
||||
|
||||
[](https://pkg.go.dev/github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland)
|
||||
|
||||
This module contains pure Go implementation of the Wayland protocol.
|
||||
Currently only wayland-client functionality is supported.
|
||||
|
||||
Go code is generated from protocol XML files using
|
||||
[`go-wayland-scanner`](cmd/go-wayland-scanner/scanner.go).
|
||||
|
||||
To load cursor, minimal port of `wayland-cursor` & `xcursor` in pure Go
|
||||
is located at [`wayland/cursor`](wayland/cursor) & [`wayland/cursor/xcursor`](wayland/cursor/xcursor)
|
||||
respectively.
|
||||
|
||||
To demonstrate the functionality of this module
|
||||
[`examples/imageviewer`](examples/imageviewer) contains a simple image
|
||||
viewer. It demos displaying a top-level window, resizing of window,
|
||||
cursor themes, pointer and keyboard. Because it's in pure Go, it can be
|
||||
compiled without CGO. You can try it using the following commands:
|
||||
|
||||
```sh
|
||||
CGO_ENABLED=0 go install github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/examples/imageviewer@latest
|
||||
|
||||
imageviewer file.jpg
|
||||
```
|
||||
4
core/pkg/go-wayland/generate
Executable file
4
core/pkg/go-wayland/generate
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
cd ./wayland
|
||||
go generate -x ./...
|
||||
9
core/pkg/go-wayland/generatep
Executable file
9
core/pkg/go-wayland/generatep
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Runs go generate for each directory, but in parallel. Any arguments are appended to the
|
||||
# go generate command.
|
||||
# Usage: $ ./generatep [go generate arguments]
|
||||
# Print all generate commands: $ ./generatep -x
|
||||
|
||||
cd ./wayland
|
||||
find . -type f -name '*.go' -exec dirname {} \; | sort -u | parallel -j 0 go generate $1 {}/.
|
||||
7544
core/pkg/go-wayland/wayland/client/client.go
Normal file
7544
core/pkg/go-wayland/wayland/client/client.go
Normal file
File diff suppressed because it is too large
Load Diff
33
core/pkg/go-wayland/wayland/client/common.go
Normal file
33
core/pkg/go-wayland/wayland/client/common.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package client
|
||||
|
||||
type Dispatcher interface {
|
||||
Dispatch(opcode uint32, fd int, data []byte)
|
||||
}
|
||||
|
||||
type Proxy interface {
|
||||
Context() *Context
|
||||
SetContext(ctx *Context)
|
||||
ID() uint32
|
||||
SetID(id uint32)
|
||||
}
|
||||
|
||||
type BaseProxy struct {
|
||||
ctx *Context
|
||||
id uint32
|
||||
}
|
||||
|
||||
func (p *BaseProxy) ID() uint32 {
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *BaseProxy) SetID(id uint32) {
|
||||
p.id = id
|
||||
}
|
||||
|
||||
func (p *BaseProxy) Context() *Context {
|
||||
return p.ctx
|
||||
}
|
||||
|
||||
func (p *BaseProxy) SetContext(ctx *Context) {
|
||||
p.ctx = ctx
|
||||
}
|
||||
110
core/pkg/go-wayland/wayland/client/context.go
Normal file
110
core/pkg/go-wayland/wayland/client/context.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Context struct {
|
||||
conn *net.UnixConn
|
||||
objects sync.Map // map[uint32]Proxy - thread-safe concurrent map
|
||||
currentID uint32
|
||||
idMu sync.Mutex // protects currentID increment
|
||||
}
|
||||
|
||||
func (ctx *Context) Register(p Proxy) {
|
||||
ctx.idMu.Lock()
|
||||
ctx.currentID++
|
||||
id := ctx.currentID
|
||||
ctx.idMu.Unlock()
|
||||
|
||||
p.SetID(id)
|
||||
p.SetContext(ctx)
|
||||
ctx.objects.Store(id, p)
|
||||
}
|
||||
|
||||
func (ctx *Context) Unregister(p Proxy) {
|
||||
ctx.objects.Delete(p.ID())
|
||||
}
|
||||
|
||||
func (ctx *Context) GetProxy(id uint32) Proxy {
|
||||
if val, ok := ctx.objects.Load(id); ok {
|
||||
return val.(Proxy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *Context) Close() error {
|
||||
return ctx.conn.Close()
|
||||
}
|
||||
|
||||
// Dispatch reads and processes incoming messages and calls [client.Dispatcher.Dispatch] on the
|
||||
// respective wayland protocol.
|
||||
// Dispatch must be called on the same goroutine as other interactions with the Context.
|
||||
// If a multi goroutine approach is desired, use [Context.GetDispatch] instead.
|
||||
// Dispatch blocks if there are no incoming messages.
|
||||
// A Dispatch loop is usually used to handle incoming messages.
|
||||
func (ctx *Context) Dispatch() error {
|
||||
return ctx.GetDispatch()()
|
||||
}
|
||||
|
||||
var ErrDispatchSenderNotFound = errors.New("dispatch: unable to find sender")
|
||||
var ErrDispatchSenderUnsupported = errors.New("dispatch: sender does not implement Dispatch method")
|
||||
var ErrDispatchUnableToReadMsg = errors.New("dispatch: unable to read msg")
|
||||
|
||||
// GetDispatch reads incoming messages and returns the dispatch function which calls
|
||||
// [client.Dispatcher.Dispatch] on the respective wayland protocol.
|
||||
// This function is now thread-safe and can be called from multiple goroutines.
|
||||
// GetDispatch blocks if there are no incoming messages.
|
||||
func (ctx *Context) GetDispatch() func() error {
|
||||
senderID, opcode, fd, data, err := ctx.ReadMsg() // Blocks if there are no incoming messages
|
||||
if err != nil {
|
||||
return func() error {
|
||||
return fmt.Errorf("%w: %w", ErrDispatchUnableToReadMsg, err)
|
||||
}
|
||||
}
|
||||
|
||||
return func() error {
|
||||
val, ok := ctx.objects.Load(senderID)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w (senderID=%d)", ErrDispatchSenderNotFound, senderID)
|
||||
}
|
||||
|
||||
sender, ok := val.(Dispatcher)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w (senderID=%d)", ErrDispatchSenderUnsupported, senderID)
|
||||
}
|
||||
|
||||
sender.Dispatch(opcode, fd, data)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Connect(addr string) (*Display, error) {
|
||||
if addr == "" {
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
if runtimeDir == "" {
|
||||
return nil, errors.New("env XDG_RUNTIME_DIR not set")
|
||||
}
|
||||
if addr == "" {
|
||||
addr = os.Getenv("WAYLAND_DISPLAY")
|
||||
}
|
||||
if addr == "" {
|
||||
addr = "wayland-0"
|
||||
}
|
||||
addr = runtimeDir + "/" + addr
|
||||
}
|
||||
|
||||
ctx := &Context{}
|
||||
|
||||
conn, err := net.DialUnix("unix", nil, &net.UnixAddr{Name: addr, Net: "unix"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx.conn = conn
|
||||
|
||||
return NewDisplay(ctx), nil
|
||||
}
|
||||
111
core/pkg/go-wayland/wayland/client/context_test.go
Normal file
111
core/pkg/go-wayland/wayland/client/context_test.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package client_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
|
||||
)
|
||||
|
||||
// Shows a dispatch loop that will block the goroutine.
|
||||
// This approach has no risk of data races but the loop blocks the goroutine when no messages are
|
||||
// received. This can be a valid approach if there are no more changes that need to be made after
|
||||
// setting up and starting the loop.
|
||||
// For a multi goroutine approach, use [client.Context.GetDispatch].
|
||||
func ExampleContext_Dispatch() {
|
||||
display, err := client.Connect("")
|
||||
if err != nil {
|
||||
log.Fatalf("Error connecting to Wayland server: %v", err)
|
||||
}
|
||||
|
||||
registry, err := display.GetRegistry()
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting Wayland registry: %v", err)
|
||||
}
|
||||
|
||||
var seat *client.Seat
|
||||
registry.SetGlobalHandler(func(e client.RegistryGlobalEvent) {
|
||||
switch e.Interface {
|
||||
case client.SeatInterfaceName:
|
||||
seat = client.NewSeat(display.Context())
|
||||
err := registry.Bind(e.Name, e.Interface, e.Version, seat)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to bind %s interface: %v", client.SeatInterfaceName, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
display.Roundtrip()
|
||||
display.Roundtrip()
|
||||
|
||||
keyboard, err := seat.GetKeyboard()
|
||||
if err != nil {
|
||||
log.Printf("Error getting keyboard: %v", err)
|
||||
}
|
||||
log.Printf("Got keyboard: %v\n", keyboard)
|
||||
|
||||
for {
|
||||
err := display.Context().Dispatch()
|
||||
if err != nil {
|
||||
log.Printf("Dispatch error: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shows how the dispatch loop can be done in another goroutine.
|
||||
// This prevents the goroutine from being blocked and allows making changes to wayland objects while
|
||||
// the dispatch loop is blocking another goroutine.
|
||||
func ExampleContext_GetDispatch() {
|
||||
display, err := client.Connect("")
|
||||
if err != nil {
|
||||
log.Fatalf("Error connecting to Wayland server: %v", err)
|
||||
}
|
||||
|
||||
registry, err := display.GetRegistry()
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting Wayland registry: %v", err)
|
||||
}
|
||||
|
||||
var seat *client.Seat
|
||||
registry.SetGlobalHandler(func(e client.RegistryGlobalEvent) {
|
||||
switch e.Interface {
|
||||
case client.SeatInterfaceName:
|
||||
seat = client.NewSeat(display.Context())
|
||||
err := registry.Bind(e.Name, e.Interface, e.Version, seat)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to bind %s interface: %v", client.SeatInterfaceName, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
display.Roundtrip()
|
||||
display.Roundtrip()
|
||||
dispatchQueue := make(chan func() error)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
dispatchQueue <- display.Context().GetDispatch()
|
||||
}
|
||||
}()
|
||||
|
||||
keyboard, err := seat.GetKeyboard()
|
||||
if err != nil {
|
||||
log.Printf("Error getting keyboard: %v", err)
|
||||
}
|
||||
log.Printf("Got keyboard: %v\n", keyboard)
|
||||
|
||||
err = errors.Join(keyboard.Release(), seat.Release(), display.Context().Close())
|
||||
if err != nil {
|
||||
fmt.Printf("Error cleaning up: %v\n", err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
// Add other cases here to do other things
|
||||
case dispatchFunc := <-dispatchQueue:
|
||||
err := dispatchFunc()
|
||||
if err != nil {
|
||||
log.Printf("Dispatch error: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
37
core/pkg/go-wayland/wayland/client/display.go
Normal file
37
core/pkg/go-wayland/wayland/client/display.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Roundtrip blocks until all pending request are processed by the server.
|
||||
// It is the implementation of [wl_display_roundtrip].
|
||||
//
|
||||
// [wl_display_roundtrip]: https://wayland.freedesktop.org/docs/html/apb.html#Client-classwl__display_1ab60f38c2f80980ac84f347e932793390
|
||||
func (i *Display) Roundtrip() error {
|
||||
callback, err := i.Sync()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get sync callback: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err2 := callback.Destroy(); err2 != nil {
|
||||
log.Printf("unable to destroy callback: %v\n", err2)
|
||||
}
|
||||
}()
|
||||
|
||||
done := false
|
||||
callback.SetDoneHandler(func(_ CallbackDoneEvent) {
|
||||
done = true
|
||||
})
|
||||
|
||||
// Wait for callback to return
|
||||
for !done {
|
||||
err := i.Context().GetDispatch()()
|
||||
if err != nil {
|
||||
return fmt.Errorf("roundtrip: failed to dispatch: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
6
core/pkg/go-wayland/wayland/client/doc.go
Normal file
6
core/pkg/go-wayland/wayland/client/doc.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Package client is Go port of wayland-client library
|
||||
// for writing pure Go GUI software for wayland supported
|
||||
// platforms.
|
||||
package client
|
||||
|
||||
//go:generate go run github.com/yaslama/go-wayland/cmd/go-wayland-scanner -pkg client -prefix wl -o client.go -i https://gitlab.freedesktop.org/wayland/wayland/-/raw/1.23.0/protocol/wayland.xml?ref_type=tags
|
||||
120
core/pkg/go-wayland/wayland/client/event.go
Normal file
120
core/pkg/go-wayland/wayland/client/event.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
var oobSpace = unix.CmsgSpace(4)
|
||||
|
||||
func (ctx *Context) ReadMsg() (senderID uint32, opcode uint32, fd int, msg []byte, err error) {
|
||||
fd = -1
|
||||
|
||||
oob := make([]byte, oobSpace)
|
||||
header := make([]byte, 8)
|
||||
|
||||
n, oobn, _, _, err := ctx.conn.ReadMsgUnix(header, oob)
|
||||
if err != nil {
|
||||
return senderID, opcode, fd, msg, err
|
||||
}
|
||||
if n != 8 {
|
||||
return senderID, opcode, fd, msg, fmt.Errorf("ctx.ReadMsg: incorrect number of bytes read for header (n=%d)", n)
|
||||
}
|
||||
|
||||
if oobn > 0 {
|
||||
fds, err := getFdsFromOob(oob, oobn, "header")
|
||||
if err != nil {
|
||||
return senderID, opcode, fd, msg, fmt.Errorf("ctx.ReadMsg: %w", err)
|
||||
}
|
||||
|
||||
if len(fds) > 0 {
|
||||
fd = fds[0]
|
||||
}
|
||||
}
|
||||
|
||||
senderID = Uint32(header[:4])
|
||||
opcodeAndSize := Uint32(header[4:8])
|
||||
opcode = opcodeAndSize & 0xffff
|
||||
size := opcodeAndSize >> 16
|
||||
|
||||
msgSize := int(size) - 8
|
||||
if msgSize == 0 {
|
||||
return senderID, opcode, fd, nil, nil
|
||||
}
|
||||
|
||||
msg = make([]byte, msgSize)
|
||||
|
||||
if fd == -1 {
|
||||
// if something was read before, then zero it out
|
||||
if oobn > 0 {
|
||||
oob = make([]byte, oobSpace)
|
||||
}
|
||||
|
||||
n, oobn, _, _, err = ctx.conn.ReadMsgUnix(msg, oob)
|
||||
} else {
|
||||
n, err = ctx.conn.Read(msg)
|
||||
}
|
||||
if err != nil {
|
||||
return senderID, opcode, fd, msg, fmt.Errorf("ctx.ReadMsg: %w", err)
|
||||
}
|
||||
if n != msgSize {
|
||||
return senderID, opcode, fd, msg, fmt.Errorf("ctx.ReadMsg: incorrect number of bytes read for msg (n=%d, msgSize=%d)", n, msgSize)
|
||||
}
|
||||
|
||||
if fd == -1 && oobn > 0 {
|
||||
fds, err := getFdsFromOob(oob, oobn, "msg")
|
||||
if err != nil {
|
||||
return senderID, opcode, fd, msg, fmt.Errorf("ctx.ReadMsg: %w", err)
|
||||
}
|
||||
|
||||
if len(fds) > 0 {
|
||||
fd = fds[0]
|
||||
}
|
||||
}
|
||||
|
||||
return senderID, opcode, fd, msg, nil
|
||||
}
|
||||
|
||||
func getFdsFromOob(oob []byte, oobn int, source string) ([]int, error) {
|
||||
if oobn > len(oob) {
|
||||
return nil, fmt.Errorf("getFdsFromOob: incorrect number of bytes read from %s for oob (oobn=%d)", source, oobn)
|
||||
}
|
||||
scms, err := unix.ParseSocketControlMessage(oob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdsFromOob: unable to parse control message from %s: %w", source, err)
|
||||
}
|
||||
|
||||
var fdsRet []int
|
||||
for _, scm := range scms {
|
||||
fds, err := unix.ParseUnixRights(&scm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdsFromOob: unable to parse unix rights from %s: %w", source, err)
|
||||
}
|
||||
|
||||
fdsRet = append(fdsRet, fds...)
|
||||
}
|
||||
|
||||
return fdsRet, nil
|
||||
}
|
||||
|
||||
func Uint32(src []byte) uint32 {
|
||||
_ = src[3]
|
||||
return *(*uint32)(unsafe.Pointer(&src[0]))
|
||||
}
|
||||
|
||||
func String(src []byte) string {
|
||||
idx := bytes.IndexByte(src, 0)
|
||||
src = src[:idx:idx]
|
||||
return *(*string)(unsafe.Pointer(&src))
|
||||
}
|
||||
|
||||
func Fixed(src []byte) float64 {
|
||||
_ = src[3]
|
||||
fx := *(*int32)(unsafe.Pointer(&src[0]))
|
||||
return fixedToFloat64(fx)
|
||||
}
|
||||
44
core/pkg/go-wayland/wayland/client/request.go
Normal file
44
core/pkg/go-wayland/wayland/client/request.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func (ctx *Context) WriteMsg(b []byte, oob []byte) error {
|
||||
n, oobn, err := ctx.conn.WriteMsgUnix(b, oob, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != len(b) || oobn != len(oob) {
|
||||
return fmt.Errorf("ctx.WriteMsg: incorrect number of bytes written (n=%d oobn=%d)", n, oobn)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func PutUint32(dst []byte, v uint32) {
|
||||
_ = dst[3]
|
||||
*(*uint32)(unsafe.Pointer(&dst[0])) = v
|
||||
}
|
||||
|
||||
func PutFixed(dst []byte, f float64) {
|
||||
fx := fixedFromfloat64(f)
|
||||
_ = dst[3]
|
||||
*(*int32)(unsafe.Pointer(&dst[0])) = fx
|
||||
}
|
||||
|
||||
// PutString places a string in Wayland's wire format on the destination buffer.
|
||||
// It first places the length of the string (plus one for the null terminator) and then the string
|
||||
// followed by a null byte.
|
||||
// The length of dst must be equal to, or greater than, len(v) + 5.
|
||||
func PutString(dst []byte, v string) {
|
||||
PutUint32(dst[:4], uint32(len(v)+1))
|
||||
copy(dst[4:], v)
|
||||
dst[4+len(v)] = '\x00' // To cause panic if dst is not large enough
|
||||
}
|
||||
|
||||
func PutArray(dst []byte, a []byte) {
|
||||
PutUint32(dst[:4], uint32(len(a)))
|
||||
copy(dst[4:], a)
|
||||
}
|
||||
24
core/pkg/go-wayland/wayland/client/util.go
Normal file
24
core/pkg/go-wayland/wayland/client/util.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package client
|
||||
|
||||
import "math"
|
||||
|
||||
// From wayland/wayland-util.h
|
||||
|
||||
func fixedToFloat64(f int32) float64 {
|
||||
u_i := (1023+44)<<52 + (1 << 51) + int64(f)
|
||||
u_d := math.Float64frombits(uint64(u_i))
|
||||
return u_d - (3 << 43)
|
||||
}
|
||||
|
||||
func fixedFromfloat64(d float64) int32 {
|
||||
u_d := d + (3 << (51 - 8))
|
||||
u_i := int64(math.Float64bits(u_d))
|
||||
return int32(u_i)
|
||||
}
|
||||
|
||||
func PaddedLen(l int) int {
|
||||
if (l & 0x3) != 0 {
|
||||
return l + (4 - (l & 0x3))
|
||||
}
|
||||
return l
|
||||
}
|
||||
Reference in New Issue
Block a user