1
0
mirror of https://github.com/AvengeMedia/DankMaterialShell.git synced 2026-05-13 07:42:46 -04:00

Compare commits

...

51 Commits

Author SHA1 Message Date
purian23 5b8edb13d8 distro: OBS updates 2025-12-12 15:42:21 -05:00
bbedward c595727b94 osd: optimize surface damage
fixes #994
2025-12-12 15:37:39 -05:00
bbedward d46302588a clipboard: add shift+enter to paste from clipboard history modal
fixes #358
2025-12-12 15:29:10 -05:00
bbedward 0ff9fdb365 notifications: add swipe to dismiss functionality
fixes #927
2025-12-12 14:39:51 -05:00
purian23 e95f7ce367 Update Copr specs 2025-12-12 12:30:18 -05:00
Pi Home Server df1a8f4066 Add lock screen layout settings (#981)
* Add lock screen layout settings

* Update translation keys
2025-12-12 11:45:00 -05:00
bbedward 32e6c1660f wallpaper: clamp max texture size 2025-12-12 11:17:28 -05:00
bbedward d6b9b72e9b ci: disable pkg builds from main release wf 2025-12-12 10:16:24 -05:00
bbedward 179ad03fa4 ci: switch to dispatch-based release flow 2025-12-12 10:01:44 -05:00
bbedward c3cb82c84e dankinstall: call add-wants for niri/hyprland with dms service 2025-12-12 09:58:12 -05:00
bbedward 4b52e2ed9e niri: fix keybind handling of cooldown-ms parameter 2025-12-12 09:52:35 -05:00
bbedward 77fd61f81e workspaces: make icons scale with bar size, fixi valign of numbers
fixes #990
2025-12-12 00:23:40 -05:00
Lucas c3ffb7f83b nix: remove wl-clipboard and cliphist dependencies (#991) 2025-12-11 21:44:36 -05:00
Lucas 89dcd72d70 nix: let paths be used instead of only packages in plugins (#988) 2025-12-11 23:57:22 +01:00
bbedward 5c3346aa9d core: fix test 2025-12-11 16:33:31 -05:00
bbedward 7c4b383477 clipboard: persistence off by default
- It's a little risky and messy of a default
2025-12-11 16:28:56 -05:00
bbedward bdc0e8e0fc clipboard: dont take ownership on nil offers 2025-12-11 15:55:42 -05:00
bbedward 6d66f93565 core: mock wayland context for tests & add i18n guidance to CONTRIBUTING 2025-12-11 14:50:02 -05:00
Lucas 9cac93b724 nix: fix pre-commit hook in dev-shell (#987) 2025-12-11 14:40:19 -05:00
bbedward 0709f263af core: add test coverage for some of the wayland stack
- mostly targeting any race issue detection
2025-12-11 13:47:18 -05:00
Lucas 4e4effd8b1 nix: fix home-manager module plugins (#984) 2025-12-11 19:36:32 +01:00
bbedward f9632cba61 core: remove hyprpicker remnant 2025-12-11 13:05:07 -05:00
bbedward 38db6a41d5 gamma: fix initial night mode enablement 2025-12-11 12:27:58 -05:00
bbedward 7c6f0432c8 clipboard: add copyEntry (by id) handler 2025-12-11 12:00:47 -05:00
bbedward 56ff9368be matugen: add option to disable DMS templates
fixes #983
2025-12-11 11:48:59 -05:00
bbedward 597e21d44d clipboard: remove wl-copy references 2025-12-11 11:10:27 -05:00
bbedward 5bf54632be media: add option to disable visualizer in bar widget
fixes #978
2025-12-11 10:55:32 -05:00
bbedward 3a8d3ee515 core: use stdlib for xdg dirs 2025-12-11 10:15:23 -05:00
bbedward 1c1cf866e2 settings: make default height screen-aware 2025-12-11 09:51:44 -05:00
bbedward ccc1df75f1 nix: update vendorHash 2025-12-11 09:50:50 -05:00
bbedward d2c3f87656 ci: fix nix vendor-hash workflow 2025-12-11 09:46:57 -05:00
bbedward 6d62229b5f clipboard: introduce native clipboard, clip-persist, clip-storage functionality 2025-12-11 09:41:07 -05:00
Marcus Ramberg 7c88865d67 Refactor pre-commit hooks to use prek (#976)
* ci: change to prek for pre-commit

* refactor: fix shellcheck warnings for the scripts

* chore: unify whitespace formatting

* nix: add prek to dev shell
2025-12-11 09:11:12 -05:00
bbedward c8cfe0cb5a dwl: fix layout popout not opening
fixes #980
2025-12-11 09:05:53 -05:00
Lucas e573bdba92 nix: add QML dependencies to dms-shell package (#967) 2025-12-11 09:19:43 +01:00
Lucas d8cd15d361 nix: add plugins in NixOS module (#970)
* nix: remove unnecessary /etc/xdg/quickshell/dms and .config/quickshell/dms

* nix: add plugins in NixOS module
2025-12-11 09:03:22 +01:00
Lucas 1db3907838 nix: fix greeter per-monitor and per-mode wallpapers (#974) 2025-12-11 09:01:14 +01:00
Lucas 72cfd37ab7 nix: fix niri module (#969) 2025-12-10 23:21:52 -05:00
bbedward 1e67ee995e plugins: hide uninstall and update buttons for system plugins 2025-12-10 19:30:58 -05:00
bbedward 6c26b4080c core: fix socket reported CLI version 2025-12-10 16:48:44 -05:00
purian23 0dbd59b223 Manual Changelog versioning 2025-12-10 13:52:29 -05:00
Lucas b2066c60d1 nix: drop unnecessary dependencies and enable power and accounts daemons (#963) 2025-12-10 19:35:58 +01:00
bbedward 8d7ae324ff Revert "distro: update ppa-build script to ref right version"
This reverts commit c0d3c4f875.
2025-12-10 13:31:15 -05:00
bbedward c0d3c4f875 distro: update ppa-build script to ref right version 2025-12-10 13:28:34 -05:00
purian23 27a771648a Ubuntu workflow tweak 2025-12-10 12:37:56 -05:00
purian23 86affc7304 Add WorkDIR to build steps 2025-12-10 12:33:41 -05:00
purian23 d939b99628 Workflow build increment logic 2025-12-10 12:27:10 -05:00
purian23 1fcf777f3d Bump OBS spec 2025-12-10 12:13:43 -05:00
purian23 7a8e23faa9 Update build scripts 2025-12-10 12:06:28 -05:00
bbedward 73a4dd3321 change codename 2025-12-10 11:18:08 -05:00
purian23 13ce873a69 Update dms stable systemd & desktop path 2025-12-10 11:16:03 -05:00
272 changed files with 11033 additions and 2706 deletions
+8
View File
@@ -0,0 +1,8 @@
[*.sh]
# like -i=4
indent_style = space
indent_size = 4
[*.nix]
# like -i=4
indent_style = space
indent_size = 4
-69
View File
@@ -1,69 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
HOOK_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$HOOK_DIR/.." && pwd)"
cd "$REPO_ROOT"
# =============================================================================
# Go CI checks (when core/ files are staged)
# =============================================================================
STAGED_CORE_FILES=$(git diff --cached --name-only --diff-filter=ACMR | grep '^core/' || true)
if [[ -n "$STAGED_CORE_FILES" ]]; then
echo "Go files staged in core/, running CI checks..."
cd "$REPO_ROOT/core"
# Format check
echo " Checking gofmt..."
UNFORMATTED=$(gofmt -s -l . 2>/dev/null || true)
if [[ -n "$UNFORMATTED" ]]; then
echo "The following files are not formatted:"
echo "$UNFORMATTED"
echo ""
echo "Run: cd core && gofmt -s -w ."
exit 1
fi
# golangci-lint
if command -v golangci-lint &>/dev/null; then
echo " Running golangci-lint..."
golangci-lint run ./...
else
echo " Warning: golangci-lint not installed, skipping lint"
echo " Install: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest"
fi
# Tests
echo " Running tests..."
if ! go test ./... >/dev/null 2>&1; then
echo "Tests failed! Run 'go test ./...' for details."
exit 1
fi
# Build checks
echo " Building..."
mkdir -p bin
go build -buildvcs=false -o bin/dms ./cmd/dms
go build -buildvcs=false -o bin/dms-distro -tags distro_binary ./cmd/dms
go build -buildvcs=false -o bin/dankinstall ./cmd/dankinstall
echo "All Go CI checks passed!"
cd "$REPO_ROOT"
fi
# =============================================================================
# i18n sync check (DISABLED for now)
# =============================================================================
# if [[ -n "${POEDITOR_API_TOKEN:-}" ]] && [[ -n "${POEDITOR_PROJECT_ID:-}" ]]; then
# if command -v python3 &>/dev/null; then
# if ! python3 scripts/i18nsync.py check &>/dev/null; then
# echo "Translations out of sync"
# echo "Run: python3 scripts/i18nsync.py sync"
# exit 1
# fi
# fi
# fi
exit 0
-14
View File
@@ -33,20 +33,6 @@ jobs:
with: with:
go-version-file: ./core/go.mod go-version-file: ./core/go.mod
- name: Format check
run: |
if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then
echo "The following files are not formatted:"
gofmt -s -l .
exit 1
fi
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v9
with:
version: v2.6
working-directory: core
- name: Test - name: Test
run: go test -v ./... run: go test -v ./...
+15
View File
@@ -0,0 +1,15 @@
name: Pre-commit Checks
on:
push:
pull_request:
branches: [master, main]
jobs:
pre-commit-check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: run pre-commit hooks
uses: j178/prek-action@v1
+261 -242
View File
@@ -1,16 +1,19 @@
name: Release name: Release
on: on:
push: workflow_dispatch:
tags: inputs:
- "v*" tag:
description: "Tag to release (e.g., v1.0.1)"
required: true
type: string
permissions: permissions:
contents: write contents: write
actions: write actions: write
concurrency: concurrency:
group: release-${{ github.ref_name }} group: release-${{ inputs.tag }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@@ -24,10 +27,14 @@ jobs:
run: run:
working-directory: core working-directory: core
env:
TAG: ${{ inputs.tag }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
ref: ${{ inputs.tag }}
fetch-depth: 0 fetch-depth: 0
- name: Set up Go - name: Set up Go
@@ -54,7 +61,7 @@ jobs:
run: | run: |
set -eux set -eux
cd cmd/dankinstall cd cmd/dankinstall
go build -trimpath -ldflags "-s -w -X main.Version=${GITHUB_REF#refs/tags/}" \ go build -trimpath -ldflags "-s -w -X main.Version=${TAG}" \
-o ../../dankinstall-${{ matrix.arch }} -o ../../dankinstall-${{ matrix.arch }}
cd ../.. cd ../..
gzip -9 -k dankinstall-${{ matrix.arch }} gzip -9 -k dankinstall-${{ matrix.arch }}
@@ -68,7 +75,7 @@ jobs:
run: | run: |
set -eux set -eux
cd cmd/dms cd cmd/dms
go build -trimpath -ldflags "-s -w -X main.Version=${GITHUB_REF#refs/tags/}" \ go build -trimpath -ldflags "-s -w -X main.Version=${TAG}" \
-o ../../dms-${{ matrix.arch }} -o ../../dms-${{ matrix.arch }}
cd ../.. cd ../..
gzip -9 -k dms-${{ matrix.arch }} gzip -9 -k dms-${{ matrix.arch }}
@@ -91,7 +98,7 @@ jobs:
run: | run: |
set -eux set -eux
cd cmd/dms cd cmd/dms
go build -trimpath -tags distro_binary -ldflags "-s -w -X main.Version=${GITHUB_REF#refs/tags/}" \ go build -trimpath -tags distro_binary -ldflags "-s -w -X main.Version=${TAG}" \
-o ../../dms-distropkg-${{ matrix.arch }} -o ../../dms-distropkg-${{ matrix.arch }}
cd ../.. cd ../..
gzip -9 -k dms-distropkg-${{ matrix.arch }} gzip -9 -k dms-distropkg-${{ matrix.arch }}
@@ -171,17 +178,18 @@ jobs:
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
needs: [build-core] #, update-versions] needs: [build-core] #, update-versions]
env: env:
TAG: ${{ github.ref_name }} TAG: ${{ inputs.tag }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
ref: ${{ inputs.tag }}
fetch-depth: 0 fetch-depth: 0
- name: Fetch updated tag after version bump - name: Fetch updated tag after version bump
run: | run: |
git fetch origin --force tag ${{ github.ref_name }} git fetch origin --force tag ${TAG}
git checkout ${{ github.ref_name }} git checkout ${TAG}
- name: Download core artifacts - name: Download core artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
@@ -273,6 +281,9 @@ jobs:
# Copy root LICENSE and CONTRIBUTING.md to quickshell/ for packaging # Copy root LICENSE and CONTRIBUTING.md to quickshell/ for packaging
cp LICENSE CONTRIBUTING.md quickshell/ cp LICENSE CONTRIBUTING.md quickshell/
# Copy root assets directory to quickshell for systemd service and desktop file
cp -r assets quickshell/
# Tar the CONTENTS of quickshell/, not the directory itself # Tar the CONTENTS of quickshell/, not the directory itself
(cd quickshell && tar --exclude='.git' \ (cd quickshell && tar --exclude='.git' \
--exclude='.github' \ --exclude='.github' \
@@ -388,288 +399,296 @@ jobs:
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
trigger-obs-update: # trigger-obs-update:
runs-on: ubuntu-latest # runs-on: ubuntu-latest
needs: release # needs: release
steps: # env:
- name: Checkout # TAG: ${{ inputs.tag }}
uses: actions/checkout@v4 # steps:
# - name: Checkout
# uses: actions/checkout@v4
# with:
# ref: ${{ inputs.tag }}
- name: Install OSC # - name: Install OSC
run: | # run: |
sudo apt-get update # sudo apt-get update
sudo apt-get install -y osc # sudo apt-get install -y osc
mkdir -p ~/.config/osc # mkdir -p ~/.config/osc
cat > ~/.config/osc/oscrc << EOF # cat > ~/.config/osc/oscrc << EOF
[general] # [general]
apiurl = https://api.opensuse.org # apiurl = https://api.opensuse.org
[https://api.opensuse.org] # [https://api.opensuse.org]
user = ${{ secrets.OBS_USERNAME }} # user = ${{ secrets.OBS_USERNAME }}
pass = ${{ secrets.OBS_PASSWORD }} # pass = ${{ secrets.OBS_PASSWORD }}
EOF # EOF
chmod 600 ~/.config/osc/oscrc # chmod 600 ~/.config/osc/oscrc
- name: Update OBS packages # - name: Update OBS packages
run: | # run: |
VERSION="${{ github.ref_name }}" # cd distro
cd distro # bash scripts/obs-upload.sh dms "Update to ${TAG}"
bash scripts/obs-upload.sh dms "Update to $VERSION"
trigger-ppa-update: # trigger-ppa-update:
runs-on: ubuntu-latest # runs-on: ubuntu-latest
needs: release # needs: release
steps: # env:
- name: Checkout # TAG: ${{ inputs.tag }}
uses: actions/checkout@v4 # steps:
# - name: Checkout
# uses: actions/checkout@v4
# with:
# ref: ${{ inputs.tag }}
- name: Install build dependencies # - name: Install build dependencies
run: | # run: |
sudo apt-get update # sudo apt-get update
sudo apt-get install -y \ # sudo apt-get install -y \
debhelper \ # debhelper \
devscripts \ # devscripts \
dput \ # dput \
lftp \ # lftp \
build-essential \ # build-essential \
fakeroot \ # fakeroot \
dpkg-dev # dpkg-dev
- name: Configure GPG # - name: Configure GPG
env: # env:
GPG_KEY: ${{ secrets.GPG_PRIVATE_KEY }} # GPG_KEY: ${{ secrets.GPG_PRIVATE_KEY }}
run: | # run: |
echo "$GPG_KEY" | gpg --import # echo "$GPG_KEY" | gpg --import
GPG_KEY_ID=$(gpg --list-secret-keys --keyid-format LONG | grep sec | awk '{print $2}' | cut -d'/' -f2) # GPG_KEY_ID=$(gpg --list-secret-keys --keyid-format LONG | grep sec | awk '{print $2}' | cut -d'/' -f2)
echo "DEBSIGN_KEYID=$GPG_KEY_ID" >> $GITHUB_ENV # echo "DEBSIGN_KEYID=$GPG_KEY_ID" >> $GITHUB_ENV
- name: Upload to PPA # - name: Upload to PPA
run: | # run: |
VERSION="${{ github.ref_name }}" # cd distro/ubuntu/ppa
cd distro/ubuntu/ppa # bash create-and-upload.sh ../dms dms questing
bash create-and-upload.sh ../dms dms questing
copr-build: # copr-build:
runs-on: ubuntu-latest # runs-on: ubuntu-latest
needs: release # needs: release
env: # env:
TAG: ${{ github.ref_name }} # TAG: ${{ inputs.tag }}
steps: # steps:
- name: Checkout repository # - name: Checkout repository
uses: actions/checkout@v4 # uses: actions/checkout@v4
# with:
# ref: ${{ inputs.tag }}
- name: Determine version # - name: Determine version
id: version # id: version
run: | # run: |
VERSION="${TAG#v}" # VERSION="${TAG#v}"
echo "version=$VERSION" >> $GITHUB_OUTPUT # echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Building DMS stable version: $VERSION" # echo "Building DMS stable version: $VERSION"
- name: Setup build environment # - name: Setup build environment
run: | # run: |
sudo apt-get update # sudo apt-get update
sudo apt-get install -y rpm wget curl jq gzip # sudo apt-get install -y rpm wget curl jq gzip
mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} # mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
- name: Download release assets # - name: Download release assets
run: | # run: |
VERSION="${{ steps.version.outputs.version }}" # VERSION="${{ steps.version.outputs.version }}"
cd ~/rpmbuild/SOURCES # cd ~/rpmbuild/SOURCES
wget "https://github.com/AvengeMedia/DankMaterialShell/releases/download/v${VERSION}/dms-qml.tar.gz" || { # wget "https://github.com/AvengeMedia/DankMaterialShell/releases/download/v${VERSION}/dms-qml.tar.gz" || {
echo "Failed to download dms-qml.tar.gz for v${VERSION}" # echo "Failed to download dms-qml.tar.gz for v${VERSION}"
exit 1 # exit 1
} # }
- name: Generate stable spec file # - name: Generate stable spec file
run: | # run: |
VERSION="${{ steps.version.outputs.version }}" # VERSION="${{ steps.version.outputs.version }}"
CHANGELOG_DATE="$(date '+%a %b %d %Y')" # CHANGELOG_DATE="$(date '+%a %b %d %Y')"
cat > ~/rpmbuild/SPECS/dms.spec <<'SPECEOF' # cat > ~/rpmbuild/SPECS/dms.spec <<'SPECEOF'
# Spec for DMS stable releases - Generated by GitHub Actions # # Spec for DMS stable releases - Generated by GitHub Actions
%global debug_package %{nil} # %global debug_package %{nil}
%global version VERSION_PLACEHOLDER # %global version VERSION_PLACEHOLDER
%global pkg_summary DankMaterialShell - Material 3 inspired shell for Wayland compositors # %global pkg_summary DankMaterialShell - Material 3 inspired shell for Wayland compositors
Name: dms # Name: dms
Version: %{version} # Version: %{version}
Release: 1%{?dist} # Release: 1%{?dist}
Summary: %{pkg_summary} # Summary: %{pkg_summary}
License: MIT # License: MIT
URL: https://github.com/AvengeMedia/DankMaterialShell # URL: https://github.com/AvengeMedia/DankMaterialShell
Source0: dms-qml.tar.gz # Source0: dms-qml.tar.gz
BuildRequires: gzip # BuildRequires: gzip
BuildRequires: wget # BuildRequires: wget
BuildRequires: systemd-rpm-macros # BuildRequires: systemd-rpm-macros
Requires: (quickshell or quickshell-git) # Requires: (quickshell or quickshell-git)
Requires: accountsservice # Requires: accountsservice
Requires: dms-cli = %{version}-%{release} # Requires: dms-cli = %{version}-%{release}
Requires: dgop # Requires: dgop
Recommends: cava # Recommends: cava
Recommends: cliphist # Recommends: cliphist
Recommends: danksearch # Recommends: danksearch
Recommends: matugen # Recommends: matugen
Recommends: wl-clipboard # Recommends: wl-clipboard
Recommends: NetworkManager # Recommends: NetworkManager
Recommends: qt6-qtmultimedia # Recommends: qt6-qtmultimedia
Suggests: qt6ct # Suggests: qt6ct
%description # %description
DankMaterialShell (DMS) is a modern Wayland desktop shell built with Quickshell # DankMaterialShell (DMS) is a modern Wayland desktop shell built with Quickshell
and optimized for the niri and hyprland compositors. Features notifications, # and optimized for the niri and hyprland compositors. Features notifications,
app launcher, wallpaper customization, and fully customizable with plugins. # app launcher, wallpaper customization, and fully customizable with plugins.
Includes auto-theming for GTK/Qt apps with matugen, 20+ customizable widgets, # Includes auto-theming for GTK/Qt apps with matugen, 20+ customizable widgets,
process monitoring, notification center, clipboard history, dock, control center, # process monitoring, notification center, clipboard history, dock, control center,
lock screen, and comprehensive plugin system. # lock screen, and comprehensive plugin system.
%package -n dms-cli # %package -n dms-cli
Summary: DankMaterialShell CLI tool # Summary: DankMaterialShell CLI tool
License: MIT # License: MIT
URL: https://github.com/AvengeMedia/DankMaterialShell # URL: https://github.com/AvengeMedia/DankMaterialShell
%description -n dms-cli # %description -n dms-cli
Command-line interface for DankMaterialShell configuration and management. # Command-line interface for DankMaterialShell configuration and management.
Provides native DBus bindings, NetworkManager integration, and system utilities. # Provides native DBus bindings, NetworkManager integration, and system utilities.
%prep # %prep
%setup -q -c -n dms-qml # %setup -q -c -n dms-qml
# Download architecture-specific binaries during build # # Download architecture-specific binaries during build
case "%{_arch}" in # case "%{_arch}" in
x86_64) # x86_64)
ARCH_SUFFIX="amd64" # ARCH_SUFFIX="amd64"
;; # ;;
aarch64) # aarch64)
ARCH_SUFFIX="arm64" # ARCH_SUFFIX="arm64"
;; # ;;
*) # *)
echo "Unsupported architecture: %{_arch}" # echo "Unsupported architecture: %{_arch}"
exit 1 # exit 1
;; # ;;
esac # esac
wget -O %{_builddir}/dms-cli.gz "https://github.com/AvengeMedia/DankMaterialShell/releases/latest/download/dms-distropkg-${ARCH_SUFFIX}.gz" || { # wget -O %{_builddir}/dms-cli.gz "https://github.com/AvengeMedia/DankMaterialShell/releases/latest/download/dms-distropkg-${ARCH_SUFFIX}.gz" || {
echo "Failed to download dms-cli for architecture %{_arch}" # echo "Failed to download dms-cli for architecture %{_arch}"
exit 1 # exit 1
} # }
gunzip -c %{_builddir}/dms-cli.gz > %{_builddir}/dms-cli # gunzip -c %{_builddir}/dms-cli.gz > %{_builddir}/dms-cli
chmod +x %{_builddir}/dms-cli # chmod +x %{_builddir}/dms-cli
%build # %build
%install # %install
install -Dm755 %{_builddir}/dms-cli %{buildroot}%{_bindir}/dms # install -Dm755 %{_builddir}/dms-cli %{buildroot}%{_bindir}/dms
install -d %{buildroot}%{_datadir}/bash-completion/completions # install -d %{buildroot}%{_datadir}/bash-completion/completions
install -d %{buildroot}%{_datadir}/zsh/site-functions # install -d %{buildroot}%{_datadir}/zsh/site-functions
install -d %{buildroot}%{_datadir}/fish/vendor_completions.d # install -d %{buildroot}%{_datadir}/fish/vendor_completions.d
%{_builddir}/dms-cli completion bash > %{buildroot}%{_datadir}/bash-completion/completions/dms || : # %{_builddir}/dms-cli completion bash > %{buildroot}%{_datadir}/bash-completion/completions/dms || :
%{_builddir}/dms-cli completion zsh > %{buildroot}%{_datadir}/zsh/site-functions/_dms || : # %{_builddir}/dms-cli completion zsh > %{buildroot}%{_datadir}/zsh/site-functions/_dms || :
%{_builddir}/dms-cli completion fish > %{buildroot}%{_datadir}/fish/vendor_completions.d/dms.fish || : # %{_builddir}/dms-cli completion fish > %{buildroot}%{_datadir}/fish/vendor_completions.d/dms.fish || :
install -Dm644 assets/systemd/dms.service %{buildroot}%{_userunitdir}/dms.service # install -Dm644 assets/systemd/dms.service %{buildroot}%{_userunitdir}/dms.service
install -Dm644 assets/dms-open.desktop %{buildroot}%{_datadir}/applications/dms-open.desktop # install -Dm644 assets/dms-open.desktop %{buildroot}%{_datadir}/applications/dms-open.desktop
install -Dm644 assets/danklogo.svg %{buildroot}%{_datadir}/icons/hicolor/scalable/apps/danklogo.svg # install -Dm644 assets/danklogo.svg %{buildroot}%{_datadir}/icons/hicolor/scalable/apps/danklogo.svg
install -dm755 %{buildroot}%{_datadir}/quickshell/dms # install -dm755 %{buildroot}%{_datadir}/quickshell/dms
cp -r %{_builddir}/dms-qml/* %{buildroot}%{_datadir}/quickshell/dms/ # cp -r %{_builddir}/dms-qml/* %{buildroot}%{_datadir}/quickshell/dms/
rm -rf %{buildroot}%{_datadir}/quickshell/dms/.git* # rm -rf %{buildroot}%{_datadir}/quickshell/dms/.git*
rm -f %{buildroot}%{_datadir}/quickshell/dms/.gitignore # rm -f %{buildroot}%{_datadir}/quickshell/dms/.gitignore
rm -rf %{buildroot}%{_datadir}/quickshell/dms/.github # rm -rf %{buildroot}%{_datadir}/quickshell/dms/.github
rm -rf %{buildroot}%{_datadir}/quickshell/dms/distro # rm -rf %{buildroot}%{_datadir}/quickshell/dms/distro
echo "%{version}" > %{buildroot}%{_datadir}/quickshell/dms/VERSION # echo "%{version}" > %{buildroot}%{_datadir}/quickshell/dms/VERSION
%posttrans # %posttrans
if [ -d "%{_sysconfdir}/xdg/quickshell/dms" ]; then # if [ -d "%{_sysconfdir}/xdg/quickshell/dms" ]; then
rmdir "%{_sysconfdir}/xdg/quickshell/dms" 2>/dev/null || true # rmdir "%{_sysconfdir}/xdg/quickshell/dms" 2>/dev/null || true
rmdir "%{_sysconfdir}/xdg/quickshell" 2>/dev/null || true # rmdir "%{_sysconfdir}/xdg/quickshell" 2>/dev/null || true
rmdir "%{_sysconfdir}/xdg" 2>/dev/null || true # rmdir "%{_sysconfdir}/xdg" 2>/dev/null || true
fi # fi
# Signal running DMS instances to reload # # Signal running DMS instances to reload
pkill -USR1 -x dms >/dev/null 2>&1 || : # pkill -USR1 -x dms >/dev/null 2>&1 || :
%files # %files
%license LICENSE # %license LICENSE
%doc README.md CONTRIBUTING.md # %doc README.md CONTRIBUTING.md
%{_datadir}/quickshell/dms/ # %{_datadir}/quickshell/dms/
%{_userunitdir}/dms.service # %{_userunitdir}/dms.service
%{_datadir}/applications/dms-open.desktop # %{_datadir}/applications/dms-open.desktop
%{_datadir}/icons/hicolor/scalable/apps/danklogo.svg # %{_datadir}/icons/hicolor/scalable/apps/danklogo.svg
%files -n dms-cli # %files -n dms-cli
%{_bindir}/dms # %{_bindir}/dms
%{_datadir}/bash-completion/completions/dms # %{_datadir}/bash-completion/completions/dms
%{_datadir}/zsh/site-functions/_dms # %{_datadir}/zsh/site-functions/_dms
%{_datadir}/fish/vendor_completions.d/dms.fish # %{_datadir}/fish/vendor_completions.d/dms.fish
%changelog # %changelog
* CHANGELOG_DATE_PLACEHOLDER AvengeMedia <contact@avengemedia.com> - VERSION_PLACEHOLDER-1 # * CHANGELOG_DATE_PLACEHOLDER AvengeMedia <contact@avengemedia.com> - VERSION_PLACEHOLDER-1
- Stable release VERSION_PLACEHOLDER # - Stable release VERSION_PLACEHOLDER
- Built from GitHub release # - Built from GitHub release
SPECEOF # SPECEOF
sed -i "s/VERSION_PLACEHOLDER/${VERSION}/g" ~/rpmbuild/SPECS/dms.spec # sed -i "s/VERSION_PLACEHOLDER/${VERSION}/g" ~/rpmbuild/SPECS/dms.spec
sed -i "s/CHANGELOG_DATE_PLACEHOLDER/${CHANGELOG_DATE}/g" ~/rpmbuild/SPECS/dms.spec # sed -i "s/CHANGELOG_DATE_PLACEHOLDER/${CHANGELOG_DATE}/g" ~/rpmbuild/SPECS/dms.spec
- name: Build SRPM # - name: Build SRPM
id: build # id: build
run: | # run: |
cd ~/rpmbuild/SPECS # cd ~/rpmbuild/SPECS
rpmbuild -bs dms.spec # rpmbuild -bs dms.spec
SRPM=$(ls ~/rpmbuild/SRPMS/*.src.rpm | tail -n 1) # SRPM=$(ls ~/rpmbuild/SRPMS/*.src.rpm | tail -n 1)
SRPM_NAME=$(basename "$SRPM") # SRPM_NAME=$(basename "$SRPM")
echo "srpm_path=$SRPM" >> $GITHUB_OUTPUT # echo "srpm_path=$SRPM" >> $GITHUB_OUTPUT
echo "srpm_name=$SRPM_NAME" >> $GITHUB_OUTPUT # echo "srpm_name=$SRPM_NAME" >> $GITHUB_OUTPUT
echo "SRPM built: $SRPM_NAME" # echo "SRPM built: $SRPM_NAME"
- name: Upload SRPM artifact # - name: Upload SRPM artifact
uses: actions/upload-artifact@v4 # uses: actions/upload-artifact@v4
with: # with:
name: dms-stable-srpm-${{ steps.version.outputs.version }} # name: dms-stable-srpm-${{ steps.version.outputs.version }}
path: ${{ steps.build.outputs.srpm_path }} # path: ${{ steps.build.outputs.srpm_path }}
retention-days: 90 # retention-days: 90
- name: Install Copr CLI # - name: Install Copr CLI
run: | # run: |
sudo apt-get install -y python3-pip # sudo apt-get install -y python3-pip
pip3 install copr-cli # pip3 install copr-cli
mkdir -p ~/.config # mkdir -p ~/.config
cat > ~/.config/copr << EOF # cat > ~/.config/copr << EOF
[copr-cli] # [copr-cli]
login = ${{ secrets.COPR_LOGIN }} # login = ${{ secrets.COPR_LOGIN }}
username = avengemedia # username = avengemedia
token = ${{ secrets.COPR_TOKEN }} # token = ${{ secrets.COPR_TOKEN }}
copr_url = https://copr.fedorainfracloud.org # copr_url = https://copr.fedorainfracloud.org
EOF # EOF
chmod 600 ~/.config/copr # chmod 600 ~/.config/copr
- name: Upload to Copr # - name: Upload to Copr
run: | # run: |
SRPM="${{ steps.build.outputs.srpm_path }}" # SRPM="${{ steps.build.outputs.srpm_path }}"
VERSION="${{ steps.version.outputs.version }}" # VERSION="${{ steps.version.outputs.version }}"
echo "Uploading SRPM to avengemedia/dms..." # echo "Uploading SRPM to avengemedia/dms..."
BUILD_OUTPUT=$(copr-cli build avengemedia/dms "$SRPM" --nowait 2>&1) # BUILD_OUTPUT=$(copr-cli build avengemedia/dms "$SRPM" --nowait 2>&1)
echo "$BUILD_OUTPUT" # echo "$BUILD_OUTPUT"
BUILD_ID=$(echo "$BUILD_OUTPUT" | grep -oP 'Build was added to.*\K[0-9]+' || echo "unknown") # BUILD_ID=$(echo "$BUILD_OUTPUT" | grep -oP 'Build was added to.*\K[0-9]+' || echo "unknown")
if [ "$BUILD_ID" != "unknown" ]; then # if [ "$BUILD_ID" != "unknown" ]; then
echo "Build submitted: https://copr.fedorainfracloud.org/coprs/avengemedia/dms/build/$BUILD_ID/" # echo "Build submitted: https://copr.fedorainfracloud.org/coprs/avengemedia/dms/build/$BUILD_ID/"
fi # fi
+32 -1
View File
@@ -215,13 +215,44 @@ jobs:
# Update openSUSE dms spec (stable only) # Update openSUSE dms spec (stable only)
sed -i "s/^Version:.*/Version: $VERSION_NO_V/" distro/opensuse/dms.spec sed -i "s/^Version:.*/Version: $VERSION_NO_V/" distro/opensuse/dms.spec
# Update Debian _service files # Update openSUSE spec changelog
DATE_STR=$(date "+%a %b %d %Y")
CHANGELOG_ENTRY="* $DATE_STR AvengeMedia <maintainer@avengemedia.com> - ${VERSION_NO_V}-1\\n- Update to stable $VERSION release\\n- Bug fixes and improvements"
sed -i "/%changelog/a\\$CHANGELOG_ENTRY\\n" distro/opensuse/dms.spec
# Update Debian _service files (both tar_scm and download_url formats)
for service in distro/debian/*/_service; do for service in distro/debian/*/_service; do
if [[ -f "$service" ]]; then if [[ -f "$service" ]]; then
# Update tar_scm revision parameter (for dms-git)
sed -i "s|<param name=\"revision\">v[0-9.]*</param>|<param name=\"revision\">$VERSION</param>|" "$service" sed -i "s|<param name=\"revision\">v[0-9.]*</param>|<param name=\"revision\">$VERSION</param>|" "$service"
# Update download_url paths (for dms stable)
sed -i "s|/v[0-9.]\+/|/$VERSION/|g" "$service"
sed -i "s|/tags/v[0-9.]\+\.tar\.gz|/tags/$VERSION.tar.gz|g" "$service"
fi fi
done done
# Update Debian changelog for dms stable
if [[ -f "distro/debian/dms/debian/changelog" ]]; then
CHANGELOG_DATE=$(date -R)
TEMP_CHANGELOG=$(mktemp)
cat > "$TEMP_CHANGELOG" << EOF
dms ($VERSION_NO_V) stable; urgency=medium
* Update to $VERSION stable release
* Bug fixes and improvements
-- Avenge Media <AvengeMedia.US@gmail.com> $CHANGELOG_DATE
EOF
cat "distro/debian/dms/debian/changelog" >> "$TEMP_CHANGELOG"
mv "$TEMP_CHANGELOG" "distro/debian/dms/debian/changelog"
echo "✓ Updated Debian changelog to $VERSION_NO_V"
fi
- name: Install Go - name: Install Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
+10 -4
View File
@@ -130,6 +130,12 @@ jobs:
run: | run: |
PACKAGES="${{ steps.packages.outputs.packages }}" PACKAGES="${{ steps.packages.outputs.packages }}"
# Export to ensure it's available to subprocesses
if [ -n "$REBUILD_RELEASE" ]; then
export REBUILD_RELEASE
echo "✓ Using rebuild release number: ppa$REBUILD_RELEASE"
fi
if [[ "$PACKAGES" == "all" ]]; then if [[ "$PACKAGES" == "all" ]]; then
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Uploading dms to PPA..." echo "Uploading dms to PPA..."
@@ -137,25 +143,25 @@ jobs:
echo "🔄 Using rebuild release number: ppa$REBUILD_RELEASE" echo "🔄 Using rebuild release number: ppa$REBUILD_RELEASE"
fi fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
bash distro/scripts/ppa-upload.sh "distro/ubuntu/dms" dms questing REBUILD_RELEASE="$REBUILD_RELEASE" bash distro/scripts/ppa-upload.sh "distro/ubuntu/dms" dms questing
echo "" echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Uploading dms-git to PPA..." echo "Uploading dms-git to PPA..."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
bash distro/scripts/ppa-upload.sh "distro/ubuntu/dms-git" dms-git questing REBUILD_RELEASE="$REBUILD_RELEASE" bash distro/scripts/ppa-upload.sh "distro/ubuntu/dms-git" dms-git questing
echo "" echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Uploading dms-greeter to PPA..." echo "Uploading dms-greeter to PPA..."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
bash distro/scripts/ppa-upload.sh "distro/ubuntu/dms-greeter" danklinux questing REBUILD_RELEASE="$REBUILD_RELEASE" bash distro/scripts/ppa-upload.sh "distro/ubuntu/dms-greeter" danklinux questing
else else
PPA_NAME="$PACKAGES" PPA_NAME="$PACKAGES"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Uploading $PACKAGES to PPA..." echo "Uploading $PACKAGES to PPA..."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
bash distro/scripts/ppa-upload.sh "distro/ubuntu/$PACKAGES" "$PPA_NAME" questing REBUILD_RELEASE="$REBUILD_RELEASE" bash distro/scripts/ppa-upload.sh "distro/ubuntu/$PACKAGES" "$PPA_NAME" questing
fi fi
- name: Summary - name: Summary
+2 -2
View File
@@ -36,7 +36,7 @@ jobs:
run: | run: |
set -euo pipefail set -euo pipefail
echo "Attempting nix build to get new vendorHash..." echo "Attempting nix build to get new vendorHash..."
if output=$(nix build .#dmsCli 2>&1); then if output=$(nix build .#dms-shell 2>&1); then
echo "Build succeeded, no hash update needed" echo "Build succeeded, no hash update needed"
exit 0 exit 0
fi fi
@@ -46,7 +46,7 @@ jobs:
[ "$current_hash" = "$new_hash" ] && { echo "vendorHash already up to date"; exit 0; } [ "$current_hash" = "$new_hash" ] && { echo "vendorHash already up to date"; exit 0; }
sed -i "s|vendorHash = \"$current_hash\"|vendorHash = \"$new_hash\"|" flake.nix sed -i "s|vendorHash = \"$current_hash\"|vendorHash = \"$new_hash\"|" flake.nix
echo "Verifying build with new vendorHash..." echo "Verifying build with new vendorHash..."
nix build .#dmsCli nix build .#dms-shell
echo "vendorHash updated successfully!" echo "vendorHash updated successfully!"
- name: Commit and push vendorHash update - name: Commit and push vendorHash update
+12
View File
@@ -0,0 +1,12 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: end-of-file-fixer
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.10.0.1
hooks:
- id: shellcheck
args: [-e, SC2164, -e, SC2001, -e, SC2012, -e, SC2317]
+6
View File
@@ -0,0 +1,6 @@
# 1.2.0
- Added clipboard and clipboard history integration
- Added swipe to dismiss notification popups and from center
- Added paste from clipboard history view - requires wtype
- Optimize surface damage of OSD & Toast
+17 -2
View File
@@ -6,10 +6,10 @@ To contribute fork this repository, make your changes, and open a pull request.
## Setup ## Setup
Enable pre-commit hooks to catch CI failures before pushing: Install [prek](https://prek.j178.dev/) then activate pre-commit hooks:
```bash ```bash
git config core.hooksPath .githooks prek install
``` ```
### Nix Development Shell ### Nix Development Shell
@@ -21,6 +21,7 @@ nix develop
``` ```
This will provide: This will provide:
- Go 1.24 toolchain (go, gopls, delve, go-tools) and GNU Make - Go 1.24 toolchain (go, gopls, delve, go-tools) and GNU Make
- Quickshell and required QML packages - Quickshell and required QML packages
- Properly configured QML2_IMPORT_PATH - Properly configured QML2_IMPORT_PATH
@@ -54,6 +55,20 @@ touch .qmlls.ini
5. Make your changes, test, and open a pull request. 5. Make your changes, test, and open a pull request.
### I18n/Localization
When adding user-facing strings, ensure they are wrapped in `I18n.tr()` with context, for example.
```qml
import qs.Common
Text {
text: I18n.tr("Hello World", "<This is context for the translators, example> Hello world greeting that appears on the lock screen")
}
```
Preferably, try to keep new terms to a minimum and re-use existing terms where possible. See `quickshell/translations/en.json` for the list of existing terms. (This isn't always possible obviously, but instead of using `Auto-connect` you would use `Autoconnect` since it's already translated)
### GO (`core` directory) ### GO (`core` directory)
1. Install the [Go Extension](https://code.visualstudio.com/docs/languages/go) 1. Install the [Go Extension](https://code.visualstudio.com/docs/languages/go)
+6 -2
View File
@@ -102,7 +102,11 @@ linters:
- linters: - linters:
- ineffassign - ineffassign
path: internal/proto/ path: internal/proto/
# binary.Write to bytes.Buffer can't fail # binary.Write/Read to bytes.Buffer can't fail
- linters: - linters:
- errcheck - errcheck
text: "Error return value of `binary\\.Write` is not checked" text: "Error return value of `binary\\.(Write|Read)` is not checked"
# bytes.Reader.Read can't fail (reads from memory)
- linters:
- errcheck
text: "Error return value of `buf\\.Read` is not checked"
+12
View File
@@ -56,3 +56,15 @@ packages:
outpkg: mocks_version outpkg: mocks_version
interfaces: interfaces:
VersionFetcher: VersionFetcher:
github.com/AvengeMedia/DankMaterialShell/core/internal/server/wlcontext:
config:
dir: "internal/mocks/wlcontext"
outpkg: mocks_wlcontext
interfaces:
WaylandContext:
github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client:
config:
dir: "internal/mocks/wlclient"
outpkg: mocks_wlclient
interfaces:
WaylandDisplay:
+7
View File
@@ -0,0 +1,7 @@
repos:
- repo: https://github.com/golangci/golangci-lint
rev: v2.6.2
hooks:
- id: golangci-lint-full
- id: golangci-lint-fmt
- id: golangci-lint-config-verify
+628
View File
@@ -0,0 +1,628 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"os/signal"
"strconv"
"syscall"
"time"
"github.com/AvengeMedia/DankMaterialShell/core/internal/clipboard"
"github.com/AvengeMedia/DankMaterialShell/core/internal/log"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/models"
"github.com/spf13/cobra"
)
var clipboardCmd = &cobra.Command{
Use: "clipboard",
Aliases: []string{"cl"},
Short: "Manage clipboard",
Long: "Interact with the clipboard manager",
}
var clipCopyCmd = &cobra.Command{
Use: "copy [text]",
Short: "Copy text to clipboard",
Long: "Copy text to clipboard. If no text provided, reads from stdin. Works without server.",
Run: runClipCopy,
}
var (
clipCopyForeground bool
clipCopyPasteOnce bool
clipCopyType string
clipJSONOutput bool
)
var clipPasteCmd = &cobra.Command{
Use: "paste",
Short: "Paste text from clipboard",
Long: "Paste text from clipboard to stdout. Works without server.",
Run: runClipPaste,
}
var clipWatchCmd = &cobra.Command{
Use: "watch [command]",
Short: "Watch clipboard for changes",
Long: `Watch clipboard for changes and optionally execute a command.
Works like wl-paste --watch. Does not require server.
If a command is provided, it will be executed each time the clipboard changes,
with the clipboard content piped to its stdin.
Examples:
dms cl watch # Print clipboard changes to stdout
dms cl watch cat # Same as above
dms cl watch notify-send # Send notification on clipboard change`,
Run: runClipWatch,
}
var clipHistoryCmd = &cobra.Command{
Use: "history",
Short: "Show clipboard history",
Long: "Show clipboard history with previews (requires server)",
Run: runClipHistory,
}
var clipGetCmd = &cobra.Command{
Use: "get <id>",
Short: "Get clipboard entry by ID",
Long: "Get full clipboard entry data by ID (requires server). Use --copy to copy it to clipboard.",
Args: cobra.ExactArgs(1),
Run: runClipGet,
}
var clipGetCopy bool
var clipDeleteCmd = &cobra.Command{
Use: "delete <id>",
Short: "Delete clipboard entry",
Long: "Delete a clipboard history entry by ID (requires server)",
Args: cobra.ExactArgs(1),
Run: runClipDelete,
}
var clipClearCmd = &cobra.Command{
Use: "clear",
Short: "Clear clipboard history",
Long: "Clear all clipboard history (requires server)",
Run: runClipClear,
}
var clipWatchStore bool
var clipSearchCmd = &cobra.Command{
Use: "search [query]",
Short: "Search clipboard history",
Long: "Search clipboard history with filters (requires server)",
Run: runClipSearch,
}
var (
clipSearchLimit int
clipSearchOffset int
clipSearchMimeType string
clipSearchImages bool
clipSearchText bool
)
var clipConfigCmd = &cobra.Command{
Use: "config",
Short: "Manage clipboard config",
Long: "Get or set clipboard configuration (requires server)",
}
var clipConfigGetCmd = &cobra.Command{
Use: "get",
Short: "Get clipboard config",
Run: runClipConfigGet,
}
var clipConfigSetCmd = &cobra.Command{
Use: "set",
Short: "Set clipboard config",
Long: `Set clipboard configuration options.
Examples:
dms cl config set --max-history 200
dms cl config set --auto-clear-days 7
dms cl config set --clear-at-startup`,
Run: runClipConfigSet,
}
var (
clipConfigMaxHistory int
clipConfigAutoClearDays int
clipConfigClearAtStartup bool
clipConfigNoClearStartup bool
clipConfigDisabled bool
clipConfigEnabled bool
clipConfigDisableHistory bool
clipConfigEnableHistory bool
clipConfigDisablePersist bool
clipConfigEnablePersist bool
)
func init() {
clipCopyCmd.Flags().BoolVarP(&clipCopyForeground, "foreground", "f", false, "Stay in foreground instead of forking")
clipCopyCmd.Flags().BoolVarP(&clipCopyPasteOnce, "paste-once", "o", false, "Exit after first paste")
clipCopyCmd.Flags().StringVarP(&clipCopyType, "type", "t", "text/plain;charset=utf-8", "MIME type")
clipWatchCmd.Flags().BoolVar(&clipJSONOutput, "json", false, "Output as JSON")
clipHistoryCmd.Flags().BoolVar(&clipJSONOutput, "json", false, "Output as JSON")
clipGetCmd.Flags().BoolVar(&clipJSONOutput, "json", false, "Output as JSON")
clipGetCmd.Flags().BoolVarP(&clipGetCopy, "copy", "c", false, "Copy entry to clipboard")
clipSearchCmd.Flags().IntVarP(&clipSearchLimit, "limit", "l", 50, "Max results")
clipSearchCmd.Flags().IntVarP(&clipSearchOffset, "offset", "o", 0, "Result offset")
clipSearchCmd.Flags().StringVarP(&clipSearchMimeType, "mime", "m", "", "Filter by MIME type")
clipSearchCmd.Flags().BoolVar(&clipSearchImages, "images", false, "Only images")
clipSearchCmd.Flags().BoolVar(&clipSearchText, "text", false, "Only text")
clipSearchCmd.Flags().BoolVar(&clipJSONOutput, "json", false, "Output as JSON")
clipConfigSetCmd.Flags().IntVar(&clipConfigMaxHistory, "max-history", 0, "Max history entries")
clipConfigSetCmd.Flags().IntVar(&clipConfigAutoClearDays, "auto-clear-days", -1, "Auto-clear entries older than N days (0 to disable)")
clipConfigSetCmd.Flags().BoolVar(&clipConfigClearAtStartup, "clear-at-startup", false, "Clear history on startup")
clipConfigSetCmd.Flags().BoolVar(&clipConfigNoClearStartup, "no-clear-at-startup", false, "Don't clear history on startup")
clipConfigSetCmd.Flags().BoolVar(&clipConfigDisabled, "disable", false, "Disable clipboard manager entirely")
clipConfigSetCmd.Flags().BoolVar(&clipConfigEnabled, "enable", false, "Enable clipboard manager")
clipConfigSetCmd.Flags().BoolVar(&clipConfigDisableHistory, "disable-history", false, "Disable clipboard history persistence")
clipConfigSetCmd.Flags().BoolVar(&clipConfigEnableHistory, "enable-history", false, "Enable clipboard history persistence")
clipConfigSetCmd.Flags().BoolVar(&clipConfigDisablePersist, "disable-persist", false, "Disable clipboard ownership persistence")
clipConfigSetCmd.Flags().BoolVar(&clipConfigEnablePersist, "enable-persist", false, "Enable clipboard ownership persistence")
clipWatchCmd.Flags().BoolVarP(&clipWatchStore, "store", "s", false, "Store clipboard changes to history (no server required)")
clipConfigCmd.AddCommand(clipConfigGetCmd, clipConfigSetCmd)
clipboardCmd.AddCommand(clipCopyCmd, clipPasteCmd, clipWatchCmd, clipHistoryCmd, clipGetCmd, clipDeleteCmd, clipClearCmd, clipSearchCmd, clipConfigCmd)
}
func runClipCopy(cmd *cobra.Command, args []string) {
var data []byte
if len(args) > 0 {
data = []byte(args[0])
} else {
var err error
data, err = io.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("read stdin: %v", err)
}
}
if err := clipboard.CopyOpts(data, clipCopyType, clipCopyForeground, clipCopyPasteOnce); err != nil {
log.Fatalf("copy: %v", err)
}
}
func runClipPaste(cmd *cobra.Command, args []string) {
data, _, err := clipboard.Paste()
if err != nil {
log.Fatalf("paste: %v", err)
}
os.Stdout.Write(data)
}
func runClipWatch(cmd *cobra.Command, args []string) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigCh
cancel()
}()
switch {
case len(args) > 0:
if err := clipboard.Watch(ctx, func(data []byte, mimeType string) {
runCommand(args, data)
}); err != nil && err != context.Canceled {
log.Fatalf("Watch error: %v", err)
}
case clipWatchStore:
if err := clipboard.Watch(ctx, func(data []byte, mimeType string) {
if err := clipboard.Store(data, mimeType); err != nil {
log.Errorf("store: %v", err)
}
}); err != nil && err != context.Canceled {
log.Fatalf("Watch error: %v", err)
}
case clipJSONOutput:
if err := clipboard.Watch(ctx, func(data []byte, mimeType string) {
out := map[string]any{
"data": string(data),
"mimeType": mimeType,
"timestamp": time.Now().Format(time.RFC3339),
"size": len(data),
}
j, _ := json.Marshal(out)
fmt.Println(string(j))
}); err != nil && err != context.Canceled {
log.Fatalf("Watch error: %v", err)
}
default:
if err := clipboard.Watch(ctx, func(data []byte, mimeType string) {
os.Stdout.Write(data)
os.Stdout.WriteString("\n")
}); err != nil && err != context.Canceled {
log.Fatalf("Watch error: %v", err)
}
}
}
func runCommand(args []string, stdin []byte) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if len(stdin) == 0 {
cmd.Run()
return
}
r, w, err := os.Pipe()
if err != nil {
cmd.Run()
return
}
cmd.Stdin = r
go func() {
w.Write(stdin)
w.Close()
}()
cmd.Run()
}
func runClipHistory(cmd *cobra.Command, args []string) {
req := models.Request{
ID: 1,
Method: "clipboard.getHistory",
}
resp, err := sendServerRequest(req)
if err != nil {
log.Fatalf("Failed to get clipboard history: %v", err)
}
if resp.Error != "" {
log.Fatalf("Error: %s", resp.Error)
}
if resp.Result == nil {
if clipJSONOutput {
fmt.Println("[]")
} else {
fmt.Println("No clipboard history")
}
return
}
historyList, ok := (*resp.Result).([]any)
if !ok {
log.Fatal("Invalid response format")
}
if clipJSONOutput {
out, _ := json.MarshalIndent(historyList, "", " ")
fmt.Println(string(out))
return
}
if len(historyList) == 0 {
fmt.Println("No clipboard history")
return
}
fmt.Println("Clipboard History:")
fmt.Println()
for _, item := range historyList {
entry, ok := item.(map[string]any)
if !ok {
continue
}
id := uint64(entry["id"].(float64))
preview := entry["preview"].(string)
timestamp := entry["timestamp"].(string)
isImage := entry["isImage"].(bool)
typeStr := "text"
if isImage {
typeStr = "image"
}
fmt.Printf("ID: %d | %s | %s\n", id, typeStr, timestamp)
fmt.Printf(" %s\n", preview)
fmt.Println()
}
}
func runClipGet(cmd *cobra.Command, args []string) {
id, err := strconv.ParseUint(args[0], 10, 64)
if err != nil {
log.Fatalf("Invalid ID: %v", err)
}
if clipGetCopy {
req := models.Request{
ID: 1,
Method: "clipboard.copyEntry",
Params: map[string]any{
"id": id,
},
}
resp, err := sendServerRequest(req)
if err != nil {
log.Fatalf("Failed to copy clipboard entry: %v", err)
}
if resp.Error != "" {
log.Fatalf("Error: %s", resp.Error)
}
fmt.Printf("Copied entry %d to clipboard\n", id)
return
}
req := models.Request{
ID: 1,
Method: "clipboard.getEntry",
Params: map[string]any{
"id": id,
},
}
resp, err := sendServerRequest(req)
if err != nil {
log.Fatalf("Failed to get clipboard entry: %v", err)
}
if resp.Error != "" {
log.Fatalf("Error: %s", resp.Error)
}
if resp.Result == nil {
log.Fatal("Entry not found")
}
entry, ok := (*resp.Result).(map[string]any)
if !ok {
log.Fatal("Invalid response format")
}
switch {
case clipJSONOutput:
output, _ := json.MarshalIndent(entry, "", " ")
fmt.Println(string(output))
default:
if data, ok := entry["data"].(string); ok {
fmt.Print(data)
} else {
output, _ := json.MarshalIndent(entry, "", " ")
fmt.Println(string(output))
}
}
}
func runClipDelete(cmd *cobra.Command, args []string) {
id, err := strconv.ParseUint(args[0], 10, 64)
if err != nil {
log.Fatalf("Invalid ID: %v", err)
}
req := models.Request{
ID: 1,
Method: "clipboard.deleteEntry",
Params: map[string]any{
"id": id,
},
}
resp, err := sendServerRequest(req)
if err != nil {
log.Fatalf("Failed to delete clipboard entry: %v", err)
}
if resp.Error != "" {
log.Fatalf("Error: %s", resp.Error)
}
fmt.Printf("Deleted entry %d\n", id)
}
func runClipClear(cmd *cobra.Command, args []string) {
req := models.Request{
ID: 1,
Method: "clipboard.clearHistory",
}
resp, err := sendServerRequest(req)
if err != nil {
log.Fatalf("Failed to clear clipboard history: %v", err)
}
if resp.Error != "" {
log.Fatalf("Error: %s", resp.Error)
}
fmt.Println("Clipboard history cleared")
}
func runClipSearch(cmd *cobra.Command, args []string) {
params := map[string]any{
"limit": clipSearchLimit,
"offset": clipSearchOffset,
}
if len(args) > 0 {
params["query"] = args[0]
}
if clipSearchMimeType != "" {
params["mimeType"] = clipSearchMimeType
}
if clipSearchImages {
params["isImage"] = true
} else if clipSearchText {
params["isImage"] = false
}
req := models.Request{
ID: 1,
Method: "clipboard.search",
Params: params,
}
resp, err := sendServerRequest(req)
if err != nil {
log.Fatalf("Failed to search clipboard: %v", err)
}
if resp.Error != "" {
log.Fatalf("Error: %s", resp.Error)
}
if resp.Result == nil {
log.Fatal("No results")
}
result, ok := (*resp.Result).(map[string]any)
if !ok {
log.Fatal("Invalid response format")
}
if clipJSONOutput {
out, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(out))
return
}
entries, _ := result["entries"].([]any)
total := int(result["total"].(float64))
hasMore := result["hasMore"].(bool)
if len(entries) == 0 {
fmt.Println("No results found")
return
}
fmt.Printf("Results: %d of %d\n\n", len(entries), total)
for _, item := range entries {
entry, ok := item.(map[string]any)
if !ok {
continue
}
id := uint64(entry["id"].(float64))
preview := entry["preview"].(string)
timestamp := entry["timestamp"].(string)
isImage := entry["isImage"].(bool)
typeStr := "text"
if isImage {
typeStr = "image"
}
fmt.Printf("ID: %d | %s | %s\n", id, typeStr, timestamp)
fmt.Printf(" %s\n\n", preview)
}
if hasMore {
fmt.Printf("Use --offset %d to see more results\n", clipSearchOffset+clipSearchLimit)
}
}
func runClipConfigGet(cmd *cobra.Command, args []string) {
req := models.Request{
ID: 1,
Method: "clipboard.getConfig",
}
resp, err := sendServerRequest(req)
if err != nil {
log.Fatalf("Failed to get config: %v", err)
}
if resp.Error != "" {
log.Fatalf("Error: %s", resp.Error)
}
if resp.Result == nil {
log.Fatal("No config returned")
}
cfg, ok := (*resp.Result).(map[string]any)
if !ok {
log.Fatal("Invalid response format")
}
output, _ := json.MarshalIndent(cfg, "", " ")
fmt.Println(string(output))
}
func runClipConfigSet(cmd *cobra.Command, args []string) {
params := map[string]any{}
if cmd.Flags().Changed("max-history") {
params["maxHistory"] = clipConfigMaxHistory
}
if cmd.Flags().Changed("auto-clear-days") {
params["autoClearDays"] = clipConfigAutoClearDays
}
if clipConfigClearAtStartup {
params["clearAtStartup"] = true
}
if clipConfigNoClearStartup {
params["clearAtStartup"] = false
}
if clipConfigDisabled {
params["disabled"] = true
}
if clipConfigEnabled {
params["disabled"] = false
}
if clipConfigDisableHistory {
params["disableHistory"] = true
}
if clipConfigEnableHistory {
params["disableHistory"] = false
}
if clipConfigDisablePersist {
params["disablePersist"] = true
}
if clipConfigEnablePersist {
params["disablePersist"] = false
}
if len(params) == 0 {
fmt.Println("No config options specified")
return
}
req := models.Request{
ID: 1,
Method: "clipboard.setConfig",
Params: params,
}
resp, err := sendServerRequest(req)
if err != nil {
log.Fatalf("Failed to set config: %v", err)
}
if resp.Error != "" {
log.Fatalf("Error: %s", resp.Error)
}
fmt.Println("Config updated")
}
+3 -9
View File
@@ -3,8 +3,8 @@ package main
import ( import (
"fmt" "fmt"
"os" "os"
"os/exec"
"github.com/AvengeMedia/DankMaterialShell/core/internal/clipboard"
"github.com/AvengeMedia/DankMaterialShell/core/internal/colorpicker" "github.com/AvengeMedia/DankMaterialShell/core/internal/colorpicker"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -121,13 +121,7 @@ func runColorPick(cmd *cobra.Command, args []string) {
} }
func copyToClipboard(text string) { func copyToClipboard(text string) {
var cmd *exec.Cmd if err := clipboard.CopyText(text); err != nil {
if _, err := exec.LookPath("wl-copy"); err == nil { fmt.Fprintln(os.Stderr, "clipboard copy failed:", err)
cmd = exec.Command("wl-copy", text)
} else {
fmt.Fprintln(os.Stderr, "wl-copy not found, cannot copy to clipboard")
return
} }
_ = cmd.Run()
} }
+1
View File
@@ -513,5 +513,6 @@ func getCommonCommands() []*cobra.Command {
screenshotCmd, screenshotCmd,
notifyActionCmd, notifyActionCmd,
matugenCmd, matugenCmd,
clipboardCmd,
} }
} }
+26 -44
View File
@@ -2,15 +2,12 @@ package main
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"net"
"os"
"time" "time"
"github.com/AvengeMedia/DankMaterialShell/core/internal/log" "github.com/AvengeMedia/DankMaterialShell/core/internal/log"
"github.com/AvengeMedia/DankMaterialShell/core/internal/matugen" "github.com/AvengeMedia/DankMaterialShell/core/internal/matugen"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/models"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -49,6 +46,7 @@ func init() {
cmd.Flags().String("stock-colors", "", "Stock theme colors JSON") cmd.Flags().String("stock-colors", "", "Stock theme colors JSON")
cmd.Flags().Bool("sync-mode-with-portal", false, "Sync color scheme with GNOME portal") cmd.Flags().Bool("sync-mode-with-portal", false, "Sync color scheme with GNOME portal")
cmd.Flags().Bool("terminals-always-dark", false, "Force terminal themes to dark variant") cmd.Flags().Bool("terminals-always-dark", false, "Force terminal themes to dark variant")
cmd.Flags().String("skip-templates", "", "Comma-separated list of templates to skip")
} }
matugenQueueCmd.Flags().Bool("wait", true, "Wait for completion") matugenQueueCmd.Flags().Bool("wait", true, "Wait for completion")
@@ -68,6 +66,7 @@ func buildMatugenOptions(cmd *cobra.Command) matugen.Options {
stockColors, _ := cmd.Flags().GetString("stock-colors") stockColors, _ := cmd.Flags().GetString("stock-colors")
syncModeWithPortal, _ := cmd.Flags().GetBool("sync-mode-with-portal") syncModeWithPortal, _ := cmd.Flags().GetBool("sync-mode-with-portal")
terminalsAlwaysDark, _ := cmd.Flags().GetBool("terminals-always-dark") terminalsAlwaysDark, _ := cmd.Flags().GetBool("terminals-always-dark")
skipTemplates, _ := cmd.Flags().GetString("skip-templates")
return matugen.Options{ return matugen.Options{
StateDir: stateDir, StateDir: stateDir,
@@ -82,6 +81,7 @@ func buildMatugenOptions(cmd *cobra.Command) matugen.Options {
StockColors: stockColors, StockColors: stockColors,
SyncModeWithPortal: syncModeWithPortal, SyncModeWithPortal: syncModeWithPortal,
TerminalsAlwaysDark: terminalsAlwaysDark, TerminalsAlwaysDark: terminalsAlwaysDark,
SkipTemplates: skipTemplates,
} }
} }
@@ -97,33 +97,10 @@ func runMatugenQueue(cmd *cobra.Command, args []string) {
wait, _ := cmd.Flags().GetBool("wait") wait, _ := cmd.Flags().GetBool("wait")
timeout, _ := cmd.Flags().GetDuration("timeout") timeout, _ := cmd.Flags().GetDuration("timeout")
socketPath := os.Getenv("DMS_SOCKET") request := models.Request{
if socketPath == "" { ID: 1,
var err error Method: "matugen.queue",
socketPath, err = server.FindSocket() Params: map[string]any{
if err != nil {
log.Info("No socket available, running synchronously")
if err := matugen.Run(opts); err != nil {
log.Fatalf("Theme generation failed: %v", err)
}
return
}
}
conn, err := net.Dial("unix", socketPath)
if err != nil {
log.Info("Socket connection failed, running synchronously")
if err := matugen.Run(opts); err != nil {
log.Fatalf("Theme generation failed: %v", err)
}
return
}
defer conn.Close()
request := map[string]any{
"id": 1,
"method": "matugen.queue",
"params": map[string]any{
"stateDir": opts.StateDir, "stateDir": opts.StateDir,
"shellDir": opts.ShellDir, "shellDir": opts.ShellDir,
"configDir": opts.ConfigDir, "configDir": opts.ConfigDir,
@@ -136,15 +113,19 @@ func runMatugenQueue(cmd *cobra.Command, args []string) {
"stockColors": opts.StockColors, "stockColors": opts.StockColors,
"syncModeWithPortal": opts.SyncModeWithPortal, "syncModeWithPortal": opts.SyncModeWithPortal,
"terminalsAlwaysDark": opts.TerminalsAlwaysDark, "terminalsAlwaysDark": opts.TerminalsAlwaysDark,
"skipTemplates": opts.SkipTemplates,
"wait": wait, "wait": wait,
}, },
} }
if err := json.NewEncoder(conn).Encode(request); err != nil {
log.Fatalf("Failed to send request: %v", err)
}
if !wait { if !wait {
if err := sendServerRequestFireAndForget(request); err != nil {
log.Info("Server unavailable, running synchronously")
if err := matugen.Run(opts); err != nil {
log.Fatalf("Theme generation failed: %v", err)
}
return
}
fmt.Println("Theme generation queued") fmt.Println("Theme generation queued")
return return
} }
@@ -154,17 +135,18 @@ func runMatugenQueue(cmd *cobra.Command, args []string) {
resultCh := make(chan error, 1) resultCh := make(chan error, 1)
go func() { go func() {
var response struct { resp, ok := tryServerRequest(request)
ID int `json:"id"` if !ok {
Result any `json:"result"` log.Info("Server unavailable, running synchronously")
Error string `json:"error"` if err := matugen.Run(opts); err != nil {
} resultCh <- err
if err := json.NewDecoder(conn).Decode(&response); err != nil { return
resultCh <- fmt.Errorf("failed to read response: %w", err) }
resultCh <- nil
return return
} }
if response.Error != "" { if resp.Error != "" {
resultCh <- fmt.Errorf("server error: %s", response.Error) resultCh <- fmt.Errorf("server error: %s", resp.Error)
return return
} }
resultCh <- nil resultCh <- nil
+3 -31
View File
@@ -1,17 +1,14 @@
package main package main
import ( import (
"encoding/json"
"fmt" "fmt"
"mime" "mime"
"net"
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/AvengeMedia/DankMaterialShell/core/internal/log" "github.com/AvengeMedia/DankMaterialShell/core/internal/log"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/models" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/models"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -93,32 +90,6 @@ func mimeTypeToCategories(mimeType string) []string {
} }
func runOpen(target string) { func runOpen(target string) {
socketPath, err := server.FindSocket()
if err != nil {
log.Warnf("DMS socket not found: %v", err)
fmt.Println("DMS is not running. Please start DMS first.")
os.Exit(1)
}
conn, err := net.Dial("unix", socketPath)
if err != nil {
log.Warnf("DMS socket connection failed: %v", err)
fmt.Println("DMS is not running. Please start DMS first.")
os.Exit(1)
}
defer conn.Close()
buf := make([]byte, 1)
for {
_, err := conn.Read(buf)
if err != nil {
return
}
if buf[0] == '\n' {
break
}
}
// Parse file:// URIs to extract the actual file path // Parse file:// URIs to extract the actual file path
actualTarget := target actualTarget := target
detectedMimeType := openMimeType detectedMimeType := openMimeType
@@ -219,8 +190,9 @@ func runOpen(target string) {
log.Infof("Sending request - Method: %s, Params: %+v", method, params) log.Infof("Sending request - Method: %s, Params: %+v", method, params)
if err := json.NewEncoder(conn).Encode(req); err != nil { if err := sendServerRequestFireAndForget(req); err != nil {
log.Fatalf("Failed to send request: %v", err) fmt.Println("DMS is not running. Please start DMS first.")
os.Exit(1)
} }
log.Infof("Request sent successfully") log.Infof("Request sent successfully")
+2 -4
View File
@@ -4,10 +4,10 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/AvengeMedia/DankMaterialShell/core/internal/clipboard"
"github.com/AvengeMedia/DankMaterialShell/core/internal/screenshot" "github.com/AvengeMedia/DankMaterialShell/core/internal/screenshot"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -257,9 +257,7 @@ func copyImageToClipboard(buf *screenshot.ShmBuffer, format screenshot.Format, q
} }
} }
cmd := exec.Command("wl-copy", "--type", mimeType) return clipboard.Copy(data.Bytes(), mimeType)
cmd.Stdin = &data
return cmd.Run()
} }
func writeImageToStdout(buf *screenshot.ShmBuffer, format screenshot.Format, quality int, pixelFormat uint32) error { func writeImageToStdout(buf *screenshot.ShmBuffer, format screenshot.Format, quality int, pixelFormat uint32) error {
+114
View File
@@ -0,0 +1,114 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"net"
"os"
"path/filepath"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/models"
)
func sendServerRequest(req models.Request) (*models.Response[any], error) {
socketPath := getServerSocketPath()
conn, err := net.Dial("unix", socketPath)
if err != nil {
return nil, fmt.Errorf("failed to connect to server (is it running?): %w", err)
}
defer conn.Close()
scanner := bufio.NewScanner(conn)
scanner.Scan() // discard initial capabilities message
reqData, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
if _, err := conn.Write(reqData); err != nil {
return nil, fmt.Errorf("failed to write request: %w", err)
}
if _, err := conn.Write([]byte("\n")); err != nil {
return nil, fmt.Errorf("failed to write newline: %w", err)
}
if !scanner.Scan() {
return nil, fmt.Errorf("failed to read response")
}
var resp models.Response[any]
if err := json.Unmarshal(scanner.Bytes(), &resp); err != nil {
return nil, fmt.Errorf("failed to unmarshal response: %w", err)
}
return &resp, nil
}
// sendServerRequestFireAndForget sends a request without waiting for a response.
// Useful for commands that trigger UI or async operations.
func sendServerRequestFireAndForget(req models.Request) error {
socketPath := getServerSocketPath()
conn, err := net.Dial("unix", socketPath)
if err != nil {
return fmt.Errorf("failed to connect to server (is it running?): %w", err)
}
defer conn.Close()
scanner := bufio.NewScanner(conn)
scanner.Scan() // discard initial capabilities message
reqData, err := json.Marshal(req)
if err != nil {
return fmt.Errorf("failed to marshal request: %w", err)
}
if _, err := conn.Write(reqData); err != nil {
return fmt.Errorf("failed to write request: %w", err)
}
if _, err := conn.Write([]byte("\n")); err != nil {
return fmt.Errorf("failed to write newline: %w", err)
}
return nil
}
// tryServerRequest attempts to send a request but returns false if server unavailable.
// Does not log errors - caller can decide what to do on failure.
func tryServerRequest(req models.Request) (*models.Response[any], bool) {
resp, err := sendServerRequest(req)
if err != nil {
return nil, false
}
return resp, true
}
func getServerSocketPath() string {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
runtimeDir = os.TempDir()
}
entries, err := os.ReadDir(runtimeDir)
if err != nil {
return filepath.Join(runtimeDir, "danklinux.sock")
}
for _, entry := range entries {
name := entry.Name()
if name == "danklinux.sock" {
return filepath.Join(runtimeDir, name)
}
if len(name) > 10 && name[:10] == "danklinux-" && filepath.Ext(name) == ".sock" {
return filepath.Join(runtimeDir, name)
}
}
return server.GetSocketPath()
}
+1
View File
@@ -155,6 +155,7 @@ func runShellInteractive(session bool) {
errChan <- fmt.Errorf("server panic: %v", r) errChan <- fmt.Errorf("server panic: %v", r)
} }
}() }()
server.CLIVersion = Version
if err := server.Start(false); err != nil { if err := server.Start(false); err != nil {
errChan <- fmt.Errorf("server error: %w", err) errChan <- fmt.Errorf("server error: %w", err)
} }
+3 -1
View File
@@ -15,7 +15,9 @@ require (
github.com/sblinch/kdl-go v0.0.0-20250930225324-bf4099d4614a github.com/sblinch/kdl-go v0.0.0-20250930225324-bf4099d4614a
github.com/spf13/cobra v1.10.1 github.com/spf13/cobra v1.10.1
github.com/stretchr/testify v1.11.1 github.com/stretchr/testify v1.11.1
go.etcd.io/bbolt v1.4.3
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39
golang.org/x/image v0.34.0
) )
require ( require (
@@ -65,6 +67,6 @@ require (
github.com/spf13/pflag v1.0.10 // indirect github.com/spf13/pflag v1.0.10 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/sys v0.38.0 golang.org/x/sys v0.38.0
golang.org/x/text v0.31.0 golang.org/x/text v0.32.0
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )
+8 -2
View File
@@ -131,20 +131,26 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY= golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY=
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0= golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
golang.org/x/image v0.34.0 h1:33gCkyw9hmwbZJeZkct8XyR11yH889EQt/QH4VmXMn8=
golang.org/x/image v0.34.0/go.mod h1:2RNFBZRB+vnwwFil8GkMdRvrJOFd1AzdZI6vOY+eJVU=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+17 -17
View File
@@ -1,4 +1,4 @@
#!/bin/sh #!/usr/bin/env bash
set -e set -e
@@ -9,8 +9,8 @@ NC='\033[0m' # No Color
# Check for root privileges # Check for root privileges
if [ "$(id -u)" == "0" ]; then if [ "$(id -u)" == "0" ]; then
printf "%bError: This script must not be run as root%b\n" "$RED" "$NC" printf "%bError: This script must not be run as root%b\n" "$RED" "$NC"
exit 1 exit 1
fi fi
# Check if running on Linux # Check if running on Linux
@@ -22,17 +22,17 @@ fi
# Detect architecture # Detect architecture
ARCH=$(uname -m) ARCH=$(uname -m)
case "$ARCH" in case "$ARCH" in
x86_64) x86_64)
ARCH="amd64" ARCH="amd64"
;; ;;
aarch64) aarch64)
ARCH="arm64" ARCH="arm64"
;; ;;
*) *)
printf "%bError: Unsupported architecture: %s%b\n" "$RED" "$ARCH" "$NC" printf "%bError: Unsupported architecture: %s%b\n" "$RED" "$ARCH" "$NC"
printf "This installer only supports x86_64 (amd64) and aarch64 (arm64) architectures\n" printf "This installer only supports x86_64 (amd64) and aarch64 (arm64) architectures\n"
exit 1 exit 1
;; ;;
esac esac
# Get the latest release version # Get the latest release version
@@ -55,7 +55,7 @@ curl -L "https://github.com/AvengeMedia/DankMaterialShell/releases/download/$LAT
curl -L "https://github.com/AvengeMedia/DankMaterialShell/releases/download/$LATEST_VERSION/dankinstall-$ARCH.gz.sha256" -o "expected.sha256" curl -L "https://github.com/AvengeMedia/DankMaterialShell/releases/download/$LATEST_VERSION/dankinstall-$ARCH.gz.sha256" -o "expected.sha256"
# Get the expected checksum # Get the expected checksum
EXPECTED_CHECKSUM=$(cat expected.sha256 | awk '{print $1}') EXPECTED_CHECKSUM=$(awk '{print $1}' expected.sha256)
# Calculate actual checksum # Calculate actual checksum
printf "%bVerifying checksum...%b\n" "$GREEN" "$NC" printf "%bVerifying checksum...%b\n" "$GREEN" "$NC"
@@ -67,7 +67,7 @@ if [ "$EXPECTED_CHECKSUM" != "$ACTUAL_CHECKSUM" ]; then
printf "Expected: %s\n" "$EXPECTED_CHECKSUM" printf "Expected: %s\n" "$EXPECTED_CHECKSUM"
printf "Got: %s\n" "$ACTUAL_CHECKSUM" printf "Got: %s\n" "$ACTUAL_CHECKSUM"
printf "The downloaded file may be corrupted or tampered with\n" printf "The downloaded file may be corrupted or tampered with\n"
cd - > /dev/null cd - >/dev/null
rm -rf "$TEMP_DIR" rm -rf "$TEMP_DIR"
exit 1 exit 1
fi fi
@@ -82,5 +82,5 @@ printf "%bRunning installer...%b\n" "$GREEN" "$NC"
./installer ./installer
# Cleanup # Cleanup
cd - > /dev/null cd - >/dev/null
rm -rf "$TEMP_DIR" rm -rf "$TEMP_DIR"
+332
View File
@@ -0,0 +1,332 @@
package clipboard
import (
"fmt"
"io"
"os"
"os/exec"
"syscall"
"github.com/AvengeMedia/DankMaterialShell/core/internal/proto/ext_data_control"
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
)
func Copy(data []byte, mimeType string) error {
return CopyOpts(data, mimeType, false, false)
}
func CopyOpts(data []byte, mimeType string, foreground, pasteOnce bool) error {
if !foreground {
return copyFork(data, mimeType, pasteOnce)
}
return copyServe(data, mimeType, pasteOnce)
}
func copyFork(data []byte, mimeType string, pasteOnce bool) error {
args := []string{os.Args[0], "cl", "copy", "--foreground"}
if pasteOnce {
args = append(args, "--paste-once")
}
args = append(args, "--type", mimeType)
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdin = nil
cmd.Stdout = nil
cmd.Stderr = nil
cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
stdin, err := cmd.StdinPipe()
if err != nil {
return fmt.Errorf("stdin pipe: %w", err)
}
if err := cmd.Start(); err != nil {
return fmt.Errorf("start: %w", err)
}
if _, err := stdin.Write(data); err != nil {
stdin.Close()
return fmt.Errorf("write stdin: %w", err)
}
stdin.Close()
return nil
}
func copyServe(data []byte, mimeType string, pasteOnce bool) error {
display, err := wlclient.Connect("")
if err != nil {
return fmt.Errorf("wayland connect: %w", err)
}
defer display.Destroy()
ctx := display.Context()
registry, err := display.GetRegistry()
if err != nil {
return fmt.Errorf("get registry: %w", err)
}
defer registry.Destroy()
var dataControlMgr *ext_data_control.ExtDataControlManagerV1
var seat *wlclient.Seat
var bindErr error
registry.SetGlobalHandler(func(e wlclient.RegistryGlobalEvent) {
switch e.Interface {
case "ext_data_control_manager_v1":
dataControlMgr = ext_data_control.NewExtDataControlManagerV1(ctx)
if err := registry.Bind(e.Name, e.Interface, e.Version, dataControlMgr); err != nil {
bindErr = err
}
case "wl_seat":
if seat != nil {
return
}
seat = wlclient.NewSeat(ctx)
if err := registry.Bind(e.Name, e.Interface, e.Version, seat); err != nil {
bindErr = err
}
}
})
display.Roundtrip()
display.Roundtrip()
if bindErr != nil {
return fmt.Errorf("registry bind: %w", bindErr)
}
if dataControlMgr == nil {
return fmt.Errorf("compositor does not support ext_data_control_manager_v1")
}
defer dataControlMgr.Destroy()
if seat == nil {
return fmt.Errorf("no seat available")
}
device, err := dataControlMgr.GetDataDevice(seat)
if err != nil {
return fmt.Errorf("get data device: %w", err)
}
defer device.Destroy()
source, err := dataControlMgr.CreateDataSource()
if err != nil {
return fmt.Errorf("create data source: %w", err)
}
if err := source.Offer(mimeType); err != nil {
return fmt.Errorf("offer mime type: %w", err)
}
if mimeType == "text/plain;charset=utf-8" || mimeType == "text/plain" {
if err := source.Offer("text/plain"); err != nil {
return fmt.Errorf("offer text/plain: %w", err)
}
if err := source.Offer("text/plain;charset=utf-8"); err != nil {
return fmt.Errorf("offer text/plain;charset=utf-8: %w", err)
}
if err := source.Offer("UTF8_STRING"); err != nil {
return fmt.Errorf("offer UTF8_STRING: %w", err)
}
if err := source.Offer("STRING"); err != nil {
return fmt.Errorf("offer STRING: %w", err)
}
if err := source.Offer("TEXT"); err != nil {
return fmt.Errorf("offer TEXT: %w", err)
}
}
cancelled := make(chan struct{})
pasted := make(chan struct{}, 1)
source.SetSendHandler(func(e ext_data_control.ExtDataControlSourceV1SendEvent) {
defer syscall.Close(e.Fd)
file := os.NewFile(uintptr(e.Fd), "pipe")
defer file.Close()
file.Write(data)
select {
case pasted <- struct{}{}:
default:
}
})
source.SetCancelledHandler(func(e ext_data_control.ExtDataControlSourceV1CancelledEvent) {
close(cancelled)
})
if err := device.SetSelection(source); err != nil {
return fmt.Errorf("set selection: %w", err)
}
display.Roundtrip()
for {
select {
case <-cancelled:
return nil
case <-pasted:
if pasteOnce {
return nil
}
default:
if err := ctx.Dispatch(); err != nil {
return nil
}
}
}
}
func CopyText(text string) error {
return Copy([]byte(text), "text/plain;charset=utf-8")
}
func Paste() ([]byte, string, error) {
display, err := wlclient.Connect("")
if err != nil {
return nil, "", fmt.Errorf("wayland connect: %w", err)
}
defer display.Destroy()
ctx := display.Context()
registry, err := display.GetRegistry()
if err != nil {
return nil, "", fmt.Errorf("get registry: %w", err)
}
defer registry.Destroy()
var dataControlMgr *ext_data_control.ExtDataControlManagerV1
var seat *wlclient.Seat
var bindErr error
registry.SetGlobalHandler(func(e wlclient.RegistryGlobalEvent) {
switch e.Interface {
case "ext_data_control_manager_v1":
dataControlMgr = ext_data_control.NewExtDataControlManagerV1(ctx)
if err := registry.Bind(e.Name, e.Interface, e.Version, dataControlMgr); err != nil {
bindErr = err
}
case "wl_seat":
if seat != nil {
return
}
seat = wlclient.NewSeat(ctx)
if err := registry.Bind(e.Name, e.Interface, e.Version, seat); err != nil {
bindErr = err
}
}
})
display.Roundtrip()
display.Roundtrip()
if bindErr != nil {
return nil, "", fmt.Errorf("registry bind: %w", bindErr)
}
if dataControlMgr == nil {
return nil, "", fmt.Errorf("compositor does not support ext_data_control_manager_v1")
}
defer dataControlMgr.Destroy()
if seat == nil {
return nil, "", fmt.Errorf("no seat available")
}
device, err := dataControlMgr.GetDataDevice(seat)
if err != nil {
return nil, "", fmt.Errorf("get data device: %w", err)
}
defer device.Destroy()
offerMimeTypes := make(map[*ext_data_control.ExtDataControlOfferV1][]string)
device.SetDataOfferHandler(func(e ext_data_control.ExtDataControlDeviceV1DataOfferEvent) {
if e.Id == nil {
return
}
offerMimeTypes[e.Id] = nil
e.Id.SetOfferHandler(func(me ext_data_control.ExtDataControlOfferV1OfferEvent) {
offerMimeTypes[e.Id] = append(offerMimeTypes[e.Id], me.MimeType)
})
})
var selectionOffer *ext_data_control.ExtDataControlOfferV1
gotSelection := false
device.SetSelectionHandler(func(e ext_data_control.ExtDataControlDeviceV1SelectionEvent) {
selectionOffer = e.Id
gotSelection = true
})
display.Roundtrip()
display.Roundtrip()
if !gotSelection || selectionOffer == nil {
return nil, "", fmt.Errorf("no clipboard data")
}
mimeTypes := offerMimeTypes[selectionOffer]
selectedMime := selectPreferredMimeType(mimeTypes)
if selectedMime == "" {
return nil, "", fmt.Errorf("no supported mime type")
}
r, w, err := os.Pipe()
if err != nil {
return nil, "", fmt.Errorf("create pipe: %w", err)
}
defer r.Close()
if err := selectionOffer.Receive(selectedMime, int(w.Fd())); err != nil {
w.Close()
return nil, "", fmt.Errorf("receive: %w", err)
}
w.Close()
display.Roundtrip()
data, err := io.ReadAll(r)
if err != nil {
return nil, "", fmt.Errorf("read: %w", err)
}
return data, selectedMime, nil
}
func PasteText() (string, error) {
data, _, err := Paste()
if err != nil {
return "", err
}
return string(data), nil
}
func selectPreferredMimeType(mimes []string) string {
preferred := []string{
"text/plain;charset=utf-8",
"text/plain",
"UTF8_STRING",
"STRING",
"TEXT",
"image/png",
"image/jpeg",
}
for _, pref := range preferred {
for _, mime := range mimes {
if mime == pref {
return mime
}
}
}
if len(mimes) > 0 {
return mimes[0]
}
return ""
}
func IsImageMimeType(mime string) bool {
return len(mime) > 6 && mime[:6] == "image/"
}
+253
View File
@@ -0,0 +1,253 @@
package clipboard
import (
"bytes"
"encoding/binary"
"fmt"
"image"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"os"
"path/filepath"
"strings"
"time"
_ "golang.org/x/image/bmp"
_ "golang.org/x/image/tiff"
bolt "go.etcd.io/bbolt"
)
type StoreConfig struct {
MaxHistory int
MaxEntrySize int64
}
func DefaultStoreConfig() StoreConfig {
return StoreConfig{
MaxHistory: 100,
MaxEntrySize: 5 * 1024 * 1024,
}
}
type Entry struct {
ID uint64
Data []byte
MimeType string
Preview string
Size int
Timestamp time.Time
IsImage bool
}
func Store(data []byte, mimeType string) error {
return StoreWithConfig(data, mimeType, DefaultStoreConfig())
}
func StoreWithConfig(data []byte, mimeType string, cfg StoreConfig) error {
if len(data) == 0 {
return nil
}
if int64(len(data)) > cfg.MaxEntrySize {
return fmt.Errorf("data too large: %d > %d", len(data), cfg.MaxEntrySize)
}
dbPath, err := getDBPath()
if err != nil {
return fmt.Errorf("get db path: %w", err)
}
db, err := bolt.Open(dbPath, 0644, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
return fmt.Errorf("open db: %w", err)
}
defer db.Close()
entry := Entry{
Data: data,
MimeType: mimeType,
Size: len(data),
Timestamp: time.Now(),
IsImage: IsImageMimeType(mimeType),
}
switch {
case entry.IsImage:
entry.Preview = imagePreview(data, mimeType)
default:
entry.Preview = textPreview(data)
}
return db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("clipboard"))
if err != nil {
return err
}
if err := deduplicateInTx(b, data); err != nil {
return err
}
id, err := b.NextSequence()
if err != nil {
return err
}
entry.ID = id
encoded, err := encodeEntry(entry)
if err != nil {
return err
}
if err := b.Put(itob(id), encoded); err != nil {
return err
}
return trimLengthInTx(b, cfg.MaxHistory)
})
}
func getDBPath() (string, error) {
cacheDir, err := os.UserCacheDir()
if err != nil {
homeDir, err := os.UserHomeDir()
if err != nil {
return "", err
}
cacheDir = filepath.Join(homeDir, ".cache")
}
dbDir := filepath.Join(cacheDir, "dms-clipboard")
if err := os.MkdirAll(dbDir, 0700); err != nil {
return "", err
}
return filepath.Join(dbDir, "db"), nil
}
func deduplicateInTx(b *bolt.Bucket, data []byte) error {
c := b.Cursor()
for k, v := c.Last(); k != nil; k, v = c.Prev() {
entry, err := decodeEntry(v)
if err != nil {
continue
}
if bytes.Equal(entry.Data, data) {
if err := b.Delete(k); err != nil {
return err
}
}
}
return nil
}
func trimLengthInTx(b *bolt.Bucket, maxHistory int) error {
c := b.Cursor()
var count int
for k, _ := c.Last(); k != nil; k, _ = c.Prev() {
if count < maxHistory {
count++
continue
}
if err := b.Delete(k); err != nil {
return err
}
}
return nil
}
func encodeEntry(e Entry) ([]byte, error) {
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, e.ID)
binary.Write(buf, binary.BigEndian, uint32(len(e.Data)))
buf.Write(e.Data)
binary.Write(buf, binary.BigEndian, uint32(len(e.MimeType)))
buf.WriteString(e.MimeType)
binary.Write(buf, binary.BigEndian, uint32(len(e.Preview)))
buf.WriteString(e.Preview)
binary.Write(buf, binary.BigEndian, int32(e.Size))
binary.Write(buf, binary.BigEndian, e.Timestamp.Unix())
if e.IsImage {
buf.WriteByte(1)
} else {
buf.WriteByte(0)
}
return buf.Bytes(), nil
}
func decodeEntry(data []byte) (Entry, error) {
buf := bytes.NewReader(data)
var e Entry
binary.Read(buf, binary.BigEndian, &e.ID)
var dataLen uint32
binary.Read(buf, binary.BigEndian, &dataLen)
e.Data = make([]byte, dataLen)
buf.Read(e.Data)
var mimeLen uint32
binary.Read(buf, binary.BigEndian, &mimeLen)
mimeBytes := make([]byte, mimeLen)
buf.Read(mimeBytes)
e.MimeType = string(mimeBytes)
var prevLen uint32
binary.Read(buf, binary.BigEndian, &prevLen)
prevBytes := make([]byte, prevLen)
buf.Read(prevBytes)
e.Preview = string(prevBytes)
var size int32
binary.Read(buf, binary.BigEndian, &size)
e.Size = int(size)
var timestamp int64
binary.Read(buf, binary.BigEndian, &timestamp)
e.Timestamp = time.Unix(timestamp, 0)
var isImage byte
binary.Read(buf, binary.BigEndian, &isImage)
e.IsImage = isImage == 1
return e, nil
}
func itob(v uint64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, v)
return b
}
func textPreview(data []byte) string {
text := string(data)
text = strings.TrimSpace(text)
text = strings.Join(strings.Fields(text), " ")
if len(text) > 100 {
return text[:100] + "…"
}
return text
}
func imagePreview(data []byte, format string) string {
config, imgFmt, err := image.DecodeConfig(bytes.NewReader(data))
if err != nil {
return fmt.Sprintf("[[ image %s %s ]]", sizeStr(len(data)), format)
}
return fmt.Sprintf("[[ image %s %s %dx%d ]]", sizeStr(len(data)), imgFmt, config.Width, config.Height)
}
func sizeStr(size int) string {
units := []string{"B", "KiB", "MiB"}
var i int
fsize := float64(size)
for fsize >= 1024 && i < len(units)-1 {
fsize /= 1024
i++
}
return fmt.Sprintf("%.0f %s", fsize, units[i])
}
+160
View File
@@ -0,0 +1,160 @@
package clipboard
import (
"context"
"fmt"
"io"
"os"
"time"
"github.com/AvengeMedia/DankMaterialShell/core/internal/proto/ext_data_control"
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
)
type ClipboardChange struct {
Data []byte
MimeType string
}
func Watch(ctx context.Context, callback func(data []byte, mimeType string)) error {
display, err := wlclient.Connect("")
if err != nil {
return fmt.Errorf("wayland connect: %w", err)
}
defer display.Destroy()
wlCtx := display.Context()
registry, err := display.GetRegistry()
if err != nil {
return fmt.Errorf("get registry: %w", err)
}
defer registry.Destroy()
var dataControlMgr *ext_data_control.ExtDataControlManagerV1
var seat *wlclient.Seat
var bindErr error
registry.SetGlobalHandler(func(e wlclient.RegistryGlobalEvent) {
switch e.Interface {
case "ext_data_control_manager_v1":
dataControlMgr = ext_data_control.NewExtDataControlManagerV1(wlCtx)
if err := registry.Bind(e.Name, e.Interface, e.Version, dataControlMgr); err != nil {
bindErr = err
}
case "wl_seat":
if seat != nil {
return
}
seat = wlclient.NewSeat(wlCtx)
if err := registry.Bind(e.Name, e.Interface, e.Version, seat); err != nil {
bindErr = err
}
}
})
display.Roundtrip()
display.Roundtrip()
if bindErr != nil {
return fmt.Errorf("registry bind: %w", bindErr)
}
if dataControlMgr == nil {
return fmt.Errorf("compositor does not support ext_data_control_manager_v1")
}
defer dataControlMgr.Destroy()
if seat == nil {
return fmt.Errorf("no seat available")
}
device, err := dataControlMgr.GetDataDevice(seat)
if err != nil {
return fmt.Errorf("get data device: %w", err)
}
defer device.Destroy()
offerMimeTypes := make(map[*ext_data_control.ExtDataControlOfferV1][]string)
device.SetDataOfferHandler(func(e ext_data_control.ExtDataControlDeviceV1DataOfferEvent) {
if e.Id == nil {
return
}
offerMimeTypes[e.Id] = nil
e.Id.SetOfferHandler(func(me ext_data_control.ExtDataControlOfferV1OfferEvent) {
offerMimeTypes[e.Id] = append(offerMimeTypes[e.Id], me.MimeType)
})
})
device.SetSelectionHandler(func(e ext_data_control.ExtDataControlDeviceV1SelectionEvent) {
if e.Id == nil {
return
}
mimes := offerMimeTypes[e.Id]
selectedMime := selectPreferredMimeType(mimes)
if selectedMime == "" {
return
}
r, w, err := os.Pipe()
if err != nil {
return
}
if err := e.Id.Receive(selectedMime, int(w.Fd())); err != nil {
w.Close()
r.Close()
return
}
w.Close()
go func() {
defer r.Close()
data, err := io.ReadAll(r)
if err != nil || len(data) == 0 {
return
}
callback(data, selectedMime)
}()
})
display.Roundtrip()
display.Roundtrip()
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
if err := wlCtx.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
return fmt.Errorf("set read deadline: %w", err)
}
if err := wlCtx.Dispatch(); err != nil && err != os.ErrDeadlineExceeded {
return fmt.Errorf("dispatch: %w", err)
}
}
}
}
func WatchChan(ctx context.Context) (<-chan ClipboardChange, <-chan error) {
ch := make(chan ClipboardChange, 16)
errCh := make(chan error, 1)
go func() {
defer close(ch)
err := Watch(ctx, func(data []byte, mimeType string) {
select {
case ch <- ClipboardChange{Data: data, MimeType: mimeType}:
default:
}
})
if err != nil && err != context.Canceled {
errCh <- err
}
close(errCh)
}()
time.Sleep(50 * time.Millisecond)
return ch, errCh
}
+314
View File
@@ -0,0 +1,314 @@
package colorpicker
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSurfaceState_ConcurrentPointerMotion(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
var wg sync.WaitGroup
const goroutines = 50
const iterations = 100
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
s.OnPointerMotion(float64(id*10+j), float64(id*10+j))
}
}(i)
}
wg.Wait()
}
func TestSurfaceState_ConcurrentScaleAccess(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
var wg sync.WaitGroup
const goroutines = 30
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
s.SetScale(int32(id%3 + 1))
}
}(i)
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
scale := s.Scale()
assert.GreaterOrEqual(t, scale, int32(1))
}
}()
}
wg.Wait()
}
func TestSurfaceState_ConcurrentLogicalSize(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
var wg sync.WaitGroup
const goroutines = 20
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
_ = s.OnLayerConfigure(1920+id, 1080+j)
}
}(i)
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
w, h := s.LogicalSize()
_ = w
_ = h
}
}()
}
wg.Wait()
}
func TestSurfaceState_ConcurrentIsDone(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
var wg sync.WaitGroup
const goroutines = 30
const iterations = 100
for i := 0; i < goroutines/3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
s.OnPointerButton(0x110, 1)
}
}()
}
for i := 0; i < goroutines/3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
s.OnKey(1, 1)
}
}()
}
for i := 0; i < goroutines/3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
picked, cancelled := s.IsDone()
_ = picked
_ = cancelled
}
}()
}
wg.Wait()
}
func TestSurfaceState_ConcurrentIsReady(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
var wg sync.WaitGroup
const goroutines = 20
const iterations = 100
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
_ = s.IsReady()
}
}()
}
wg.Wait()
}
func TestSurfaceState_ConcurrentSwapBuffers(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
var wg sync.WaitGroup
const goroutines = 20
const iterations = 100
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
s.SwapBuffers()
}
}()
}
wg.Wait()
}
func TestSurfaceState_ZeroScale(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
s.SetScale(0)
assert.Equal(t, int32(1), s.Scale())
}
func TestSurfaceState_NegativeScale(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
s.SetScale(-5)
assert.Equal(t, int32(1), s.Scale())
}
func TestSurfaceState_ZeroDimensionConfigure(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
err := s.OnLayerConfigure(0, 100)
assert.NoError(t, err)
err = s.OnLayerConfigure(100, 0)
assert.NoError(t, err)
err = s.OnLayerConfigure(-1, 100)
assert.NoError(t, err)
w, h := s.LogicalSize()
assert.Equal(t, 0, w)
assert.Equal(t, 0, h)
}
func TestSurfaceState_PickColorNilBuffer(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
color, ok := s.PickColor()
assert.False(t, ok)
assert.Equal(t, Color{}, color)
}
func TestSurfaceState_RedrawNilBuffer(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
buf := s.Redraw()
assert.Nil(t, buf)
}
func TestSurfaceState_RedrawScreenOnlyNilBuffer(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
buf := s.RedrawScreenOnly()
assert.Nil(t, buf)
}
func TestSurfaceState_FrontRenderBufferNil(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
buf := s.FrontRenderBuffer()
assert.Nil(t, buf)
}
func TestSurfaceState_ScreenBufferNil(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
buf := s.ScreenBuffer()
assert.Nil(t, buf)
}
func TestSurfaceState_DestroyMultipleTimes(t *testing.T) {
s := NewSurfaceState(FormatHex, false)
s.Destroy()
s.Destroy()
}
func TestClamp(t *testing.T) {
tests := []struct {
v, lo, hi, expected int
}{
{5, 0, 10, 5},
{-5, 0, 10, 0},
{15, 0, 10, 10},
{0, 0, 10, 0},
{10, 0, 10, 10},
}
for _, tt := range tests {
result := clamp(tt.v, tt.lo, tt.hi)
assert.Equal(t, tt.expected, result)
}
}
func TestClampF(t *testing.T) {
tests := []struct {
v, lo, hi, expected float64
}{
{5.0, 0.0, 10.0, 5.0},
{-5.0, 0.0, 10.0, 0.0},
{15.0, 0.0, 10.0, 10.0},
{0.0, 0.0, 10.0, 0.0},
{10.0, 0.0, 10.0, 10.0},
}
for _, tt := range tests {
result := clampF(tt.v, tt.lo, tt.hi)
assert.InDelta(t, tt.expected, result, 0.001)
}
}
func TestAbs(t *testing.T) {
tests := []struct {
v, expected int
}{
{5, 5},
{-5, 5},
{0, 0},
}
for _, tt := range tests {
result := abs(tt.v)
assert.Equal(t, tt.expected, result)
}
}
func TestBlendColors(t *testing.T) {
bg := Color{R: 0, G: 0, B: 0, A: 255}
fg := Color{R: 255, G: 255, B: 255, A: 255}
result := blendColors(bg, fg, 0.0)
assert.Equal(t, bg.R, result.R)
assert.Equal(t, bg.G, result.G)
assert.Equal(t, bg.B, result.B)
result = blendColors(bg, fg, 1.0)
assert.Equal(t, fg.R, result.R)
assert.Equal(t, fg.G, result.G)
assert.Equal(t, fg.B, result.B)
result = blendColors(bg, fg, 0.5)
assert.InDelta(t, 127, int(result.R), 1)
assert.InDelta(t, 127, int(result.G), 1)
assert.InDelta(t, 127, int(result.B), 1)
result = blendColors(bg, fg, -1.0)
assert.Equal(t, bg.R, result.R)
result = blendColors(bg, fg, 2.0)
assert.Equal(t, fg.R, result.R)
}
+5 -4
View File
@@ -615,10 +615,11 @@ func (cd *ConfigDeployer) transformNiriConfigForNonSystemd(config, terminalComma
spawnDms := `spawn-at-startup "dms" "run"` spawnDms := `spawn-at-startup "dms" "run"`
if !strings.Contains(config, spawnDms) { if !strings.Contains(config, spawnDms) {
config = strings.Replace(config, // Insert spawn-at-startup for dms after the environment block
`spawn-at-startup "bash" "-c" "wl-paste --watch cliphist store &"`, envBlockEnd := regexp.MustCompile(`environment \{[^}]*\}`)
`spawn-at-startup "bash" "-c" "wl-paste --watch cliphist store &"`+"\n"+spawnDms, if loc := envBlockEnd.FindStringIndex(config); loc != nil {
1) config = config[:loc[1]] + "\n" + spawnDms + config[loc[1]:]
}
} }
return config return config
+2 -4
View File
@@ -5,15 +5,13 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/AvengeMedia/DankMaterialShell/core/internal/utils"
) )
func LocateDMSConfig() (string, error) { func LocateDMSConfig() (string, error) {
var primaryPaths []string var primaryPaths []string
configHome := utils.XDGConfigHome() configHome, err := os.UserConfigDir()
if configHome != "" { if err == nil && configHome != "" {
primaryPaths = append(primaryPaths, filepath.Join(configHome, "quickshell", "dms")) primaryPaths = append(primaryPaths, filepath.Join(configHome, "quickshell", "dms"))
} }
@@ -12,7 +12,6 @@ monitor = , preferred,auto,auto
# ================== # ==================
exec-once = dbus-update-activation-environment --systemd --all exec-once = dbus-update-activation-environment --systemd --all
exec-once = systemctl --user start hyprland-session.target exec-once = systemctl --user start hyprland-session.target
exec-once = bash -c "wl-paste --watch cliphist store &"
# ================== # ==================
# INPUT CONFIG # INPUT CONFIG
-1
View File
@@ -109,7 +109,6 @@ overview {
// which may be more convenient to use. // which may be more convenient to use.
// See the binds section below for more spawn examples. // See the binds section below for more spawn examples.
// This line starts waybar, a commonly used bar for Wayland compositors. // This line starts waybar, a commonly used bar for Wayland compositors.
spawn-at-startup "bash" "-c" "wl-paste --watch cliphist store &"
environment { environment {
XDG_CURRENT_DESKTOP "niri" XDG_CURRENT_DESKTOP "niri"
} }
+1 -5
View File
@@ -103,10 +103,8 @@ func (a *ArchDistribution) DetectDependenciesWithTerminal(ctx context.Context, w
dependencies = append(dependencies, a.detectXwaylandSatellite()) dependencies = append(dependencies, a.detectXwaylandSatellite())
} }
// Base detections (common across distros)
dependencies = append(dependencies, a.detectMatugen()) dependencies = append(dependencies, a.detectMatugen())
dependencies = append(dependencies, a.detectDgop()) dependencies = append(dependencies, a.detectDgop())
dependencies = append(dependencies, a.detectClipboardTools()...)
return dependencies, nil return dependencies, nil
} }
@@ -139,8 +137,6 @@ func (a *ArchDistribution) GetPackageMappingWithVariants(wm deps.WindowManager,
"ghostty": {Name: "ghostty", Repository: RepoTypeSystem}, "ghostty": {Name: "ghostty", Repository: RepoTypeSystem},
"kitty": {Name: "kitty", Repository: RepoTypeSystem}, "kitty": {Name: "kitty", Repository: RepoTypeSystem},
"alacritty": {Name: "alacritty", Repository: RepoTypeSystem}, "alacritty": {Name: "alacritty", Repository: RepoTypeSystem},
"cliphist": {Name: "cliphist", Repository: RepoTypeSystem},
"wl-clipboard": {Name: "wl-clipboard", Repository: RepoTypeSystem},
"xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem}, "xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem},
"accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem}, "accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem},
} }
@@ -344,7 +340,7 @@ func (a *ArchDistribution) InstallPackages(ctx context.Context, dependencies []d
a.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err)) a.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err))
} }
if err := a.EnableDMSService(ctx); err != nil { if err := a.EnableDMSService(ctx, wm); err != nil {
a.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err)) a.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err))
} }
+13 -32
View File
@@ -185,37 +185,6 @@ func (b *BaseDistribution) detectSpecificTerminal(terminal deps.Terminal) deps.D
} }
} }
func (b *BaseDistribution) detectClipboardTools() []deps.Dependency {
var dependencies []deps.Dependency
cliphist := deps.StatusMissing
if b.commandExists("cliphist") {
cliphist = deps.StatusInstalled
}
wlClipboard := deps.StatusMissing
if b.commandExists("wl-copy") && b.commandExists("wl-paste") {
wlClipboard = deps.StatusInstalled
}
dependencies = append(dependencies,
deps.Dependency{
Name: "cliphist",
Status: cliphist,
Description: "Wayland clipboard manager",
Required: true,
},
deps.Dependency{
Name: "wl-clipboard",
Status: wlClipboard,
Description: "Wayland clipboard utilities",
Required: true,
},
)
return dependencies
}
func (b *BaseDistribution) detectHyprlandTools() []deps.Dependency { func (b *BaseDistribution) detectHyprlandTools() []deps.Dependency {
var dependencies []deps.Dependency var dependencies []deps.Dependency
@@ -597,12 +566,24 @@ TERMINAL=%s
return nil return nil
} }
func (b *BaseDistribution) EnableDMSService(ctx context.Context) error { func (b *BaseDistribution) EnableDMSService(ctx context.Context, wm deps.WindowManager) error {
cmd := exec.CommandContext(ctx, "systemctl", "--user", "enable", "--now", "dms") cmd := exec.CommandContext(ctx, "systemctl", "--user", "enable", "--now", "dms")
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to enable dms service: %w", err) return fmt.Errorf("failed to enable dms service: %w", err)
} }
b.log("Enabled dms systemd user service") b.log("Enabled dms systemd user service")
switch wm {
case deps.WindowManagerNiri:
if err := exec.CommandContext(ctx, "systemctl", "--user", "add-wants", "niri.service", "dms").Run(); err != nil {
b.log("Warning: failed to add dms as a want for niri.service")
}
case deps.WindowManagerHyprland:
if err := exec.CommandContext(ctx, "systemctl", "--user", "add-wants", "hyprland-session.target", "dms").Run(); err != nil {
b.log("Warning: failed to add dms as a want for hyprland-session.target")
}
}
return nil return nil
} }
+2 -5
View File
@@ -69,7 +69,6 @@ func (d *DebianDistribution) DetectDependenciesWithTerminal(ctx context.Context,
dependencies = append(dependencies, d.detectMatugen()) dependencies = append(dependencies, d.detectMatugen())
dependencies = append(dependencies, d.detectDgop()) dependencies = append(dependencies, d.detectDgop())
dependencies = append(dependencies, d.detectClipboardTools()...)
return dependencies, nil return dependencies, nil
} }
@@ -102,7 +101,6 @@ func (d *DebianDistribution) GetPackageMappingWithVariants(wm deps.WindowManager
"git": {Name: "git", Repository: RepoTypeSystem}, "git": {Name: "git", Repository: RepoTypeSystem},
"kitty": {Name: "kitty", Repository: RepoTypeSystem}, "kitty": {Name: "kitty", Repository: RepoTypeSystem},
"alacritty": {Name: "alacritty", Repository: RepoTypeSystem}, "alacritty": {Name: "alacritty", Repository: RepoTypeSystem},
"wl-clipboard": {Name: "wl-clipboard", Repository: RepoTypeSystem},
"xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem}, "xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem},
"accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem}, "accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem},
@@ -111,7 +109,6 @@ func (d *DebianDistribution) GetPackageMappingWithVariants(wm deps.WindowManager
"quickshell": d.getQuickshellMapping(variants["quickshell"]), "quickshell": d.getQuickshellMapping(variants["quickshell"]),
"matugen": {Name: "matugen", Repository: RepoTypeOBS, RepoURL: "home:AvengeMedia:danklinux"}, "matugen": {Name: "matugen", Repository: RepoTypeOBS, RepoURL: "home:AvengeMedia:danklinux"},
"dgop": {Name: "dgop", Repository: RepoTypeOBS, RepoURL: "home:AvengeMedia:danklinux"}, "dgop": {Name: "dgop", Repository: RepoTypeOBS, RepoURL: "home:AvengeMedia:danklinux"},
"cliphist": {Name: "cliphist", Repository: RepoTypeOBS, RepoURL: "home:AvengeMedia:danklinux"},
"ghostty": {Name: "ghostty", Repository: RepoTypeOBS, RepoURL: "home:AvengeMedia:danklinux"}, "ghostty": {Name: "ghostty", Repository: RepoTypeOBS, RepoURL: "home:AvengeMedia:danklinux"},
} }
@@ -312,7 +309,7 @@ func (d *DebianDistribution) InstallPackages(ctx context.Context, dependencies [
d.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err)) d.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err))
} }
if err := d.EnableDMSService(ctx); err != nil { if err := d.EnableDMSService(ctx, wm); err != nil {
d.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err)) d.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err))
} }
@@ -549,7 +546,7 @@ func (d *DebianDistribution) installBuildDependencies(ctx context.Context, manua
if err := d.installRust(ctx, sudoPassword, progressChan); err != nil { if err := d.installRust(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install Rust: %w", err) return fmt.Errorf("failed to install Rust: %w", err)
} }
case "cliphist", "dgop": case "dgop":
if err := d.installGo(ctx, sudoPassword, progressChan); err != nil { if err := d.installGo(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install Go: %w", err) return fmt.Errorf("failed to install Go: %w", err)
} }
+1 -5
View File
@@ -88,10 +88,8 @@ func (f *FedoraDistribution) DetectDependenciesWithTerminal(ctx context.Context,
dependencies = append(dependencies, f.detectXwaylandSatellite()) dependencies = append(dependencies, f.detectXwaylandSatellite())
} }
// Base detections (common across distros)
dependencies = append(dependencies, f.detectMatugen()) dependencies = append(dependencies, f.detectMatugen())
dependencies = append(dependencies, f.detectDgop()) dependencies = append(dependencies, f.detectDgop())
dependencies = append(dependencies, f.detectClipboardTools()...)
return dependencies, nil return dependencies, nil
} }
@@ -117,14 +115,12 @@ func (f *FedoraDistribution) GetPackageMappingWithVariants(wm deps.WindowManager
"ghostty": {Name: "ghostty", Repository: RepoTypeCOPR, RepoURL: "avengemedia/danklinux"}, "ghostty": {Name: "ghostty", Repository: RepoTypeCOPR, RepoURL: "avengemedia/danklinux"},
"kitty": {Name: "kitty", Repository: RepoTypeSystem}, "kitty": {Name: "kitty", Repository: RepoTypeSystem},
"alacritty": {Name: "alacritty", Repository: RepoTypeSystem}, "alacritty": {Name: "alacritty", Repository: RepoTypeSystem},
"wl-clipboard": {Name: "wl-clipboard", Repository: RepoTypeSystem},
"xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem}, "xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem},
"accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem}, "accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem},
// COPR packages // COPR packages
"quickshell": f.getQuickshellMapping(variants["quickshell"]), "quickshell": f.getQuickshellMapping(variants["quickshell"]),
"matugen": {Name: "matugen", Repository: RepoTypeCOPR, RepoURL: "avengemedia/danklinux"}, "matugen": {Name: "matugen", Repository: RepoTypeCOPR, RepoURL: "avengemedia/danklinux"},
"cliphist": {Name: "cliphist", Repository: RepoTypeCOPR, RepoURL: "avengemedia/danklinux"},
"dms (DankMaterialShell)": f.getDmsMapping(variants["dms (DankMaterialShell)"]), "dms (DankMaterialShell)": f.getDmsMapping(variants["dms (DankMaterialShell)"]),
"dgop": {Name: "dgop", Repository: RepoTypeCOPR, RepoURL: "avengemedia/danklinux"}, "dgop": {Name: "dgop", Repository: RepoTypeCOPR, RepoURL: "avengemedia/danklinux"},
} }
@@ -353,7 +349,7 @@ func (f *FedoraDistribution) InstallPackages(ctx context.Context, dependencies [
f.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err)) f.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err))
} }
if err := f.EnableDMSService(ctx); err != nil { if err := f.EnableDMSService(ctx, wm); err != nil {
f.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err)) f.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err))
} }
+1 -4
View File
@@ -107,7 +107,6 @@ func (g *GentooDistribution) DetectDependenciesWithTerminal(ctx context.Context,
dependencies = append(dependencies, g.detectMatugen()) dependencies = append(dependencies, g.detectMatugen())
dependencies = append(dependencies, g.detectDgop()) dependencies = append(dependencies, g.detectDgop())
dependencies = append(dependencies, g.detectClipboardTools()...)
return dependencies, nil return dependencies, nil
} }
@@ -140,7 +139,6 @@ func (g *GentooDistribution) GetPackageMappingWithVariants(wm deps.WindowManager
"git": {Name: "dev-vcs/git", Repository: RepoTypeSystem}, "git": {Name: "dev-vcs/git", Repository: RepoTypeSystem},
"kitty": {Name: "x11-terms/kitty", Repository: RepoTypeSystem, UseFlags: "X wayland"}, "kitty": {Name: "x11-terms/kitty", Repository: RepoTypeSystem, UseFlags: "X wayland"},
"alacritty": {Name: "x11-terms/alacritty", Repository: RepoTypeSystem, UseFlags: "X wayland"}, "alacritty": {Name: "x11-terms/alacritty", Repository: RepoTypeSystem, UseFlags: "X wayland"},
"wl-clipboard": {Name: "gui-apps/wl-clipboard", Repository: RepoTypeSystem},
"xdg-desktop-portal-gtk": {Name: "sys-apps/xdg-desktop-portal-gtk", Repository: RepoTypeSystem, UseFlags: "wayland X"}, "xdg-desktop-portal-gtk": {Name: "sys-apps/xdg-desktop-portal-gtk", Repository: RepoTypeSystem, UseFlags: "wayland X"},
"accountsservice": {Name: "sys-apps/accountsservice", Repository: RepoTypeSystem}, "accountsservice": {Name: "sys-apps/accountsservice", Repository: RepoTypeSystem},
@@ -151,7 +149,6 @@ func (g *GentooDistribution) GetPackageMappingWithVariants(wm deps.WindowManager
"quickshell": g.getQuickshellMapping(variants["quickshell"]), "quickshell": g.getQuickshellMapping(variants["quickshell"]),
"matugen": {Name: "x11-misc/matugen", Repository: RepoTypeGURU, AcceptKeywords: archKeyword}, "matugen": {Name: "x11-misc/matugen", Repository: RepoTypeGURU, AcceptKeywords: archKeyword},
"cliphist": {Name: "app-misc/cliphist", Repository: RepoTypeGURU, AcceptKeywords: archKeyword},
"dms (DankMaterialShell)": g.getDmsMapping(variants["dms (DankMaterialShell)"]), "dms (DankMaterialShell)": g.getDmsMapping(variants["dms (DankMaterialShell)"]),
"dgop": {Name: "dgop", Repository: RepoTypeManual, BuildFunc: "installDgop"}, "dgop": {Name: "dgop", Repository: RepoTypeManual, BuildFunc: "installDgop"},
} }
@@ -410,7 +407,7 @@ func (g *GentooDistribution) InstallPackages(ctx context.Context, dependencies [
g.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err)) g.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err))
} }
if err := g.EnableDMSService(ctx); err != nil { if err := g.EnableDMSService(ctx, wm); err != nil {
g.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err)) g.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err))
} }
-232
View File
@@ -74,10 +74,6 @@ func (m *ManualPackageInstaller) InstallManualPackages(ctx context.Context, pack
if err := m.installHyprland(ctx, sudoPassword, progressChan); err != nil { if err := m.installHyprland(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install hyprland: %w", err) return fmt.Errorf("failed to install hyprland: %w", err)
} }
case "hyprpicker":
if err := m.installHyprpicker(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install hyprpicker: %w", err)
}
case "ghostty": case "ghostty":
if err := m.installGhostty(ctx, sudoPassword, progressChan); err != nil { if err := m.installGhostty(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install ghostty: %w", err) return fmt.Errorf("failed to install ghostty: %w", err)
@@ -86,10 +82,6 @@ func (m *ManualPackageInstaller) InstallManualPackages(ctx context.Context, pack
if err := m.installMatugen(ctx, sudoPassword, progressChan); err != nil { if err := m.installMatugen(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install matugen: %w", err) return fmt.Errorf("failed to install matugen: %w", err)
} }
case "cliphist":
if err := m.installCliphist(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install cliphist: %w", err)
}
case "xwayland-satellite": case "xwayland-satellite":
if err := m.installXwaylandSatellite(ctx, sudoPassword, progressChan); err != nil { if err := m.installXwaylandSatellite(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install xwayland-satellite: %w", err) return fmt.Errorf("failed to install xwayland-satellite: %w", err)
@@ -405,184 +397,6 @@ func (m *ManualPackageInstaller) installHyprland(ctx context.Context, sudoPasswo
return nil return nil
} }
func (m *ManualPackageInstaller) installHyprpicker(ctx context.Context, sudoPassword string, progressChan chan<- InstallProgressMsg) error {
m.log("Installing hyprpicker from source...")
homeDir := os.Getenv("HOME")
if homeDir == "" {
return fmt.Errorf("HOME environment variable not set")
}
cacheDir := filepath.Join(homeDir, ".cache", "dankinstall")
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return fmt.Errorf("failed to create cache directory: %w", err)
}
// Install hyprutils first
progressChan <- InstallProgressMsg{
Phase: PhaseSystemPackages,
Progress: 0.05,
Step: "Building hyprutils dependency...",
IsComplete: false,
CommandInfo: "git clone https://github.com/hyprwm/hyprutils.git",
}
hyprutilsDir := filepath.Join(cacheDir, "hyprutils-build")
if err := os.MkdirAll(hyprutilsDir, 0755); err != nil {
return fmt.Errorf("failed to create hyprutils directory: %w", err)
}
defer os.RemoveAll(hyprutilsDir)
cloneUtilsCmd := exec.CommandContext(ctx, "git", "clone", "https://github.com/hyprwm/hyprutils.git", hyprutilsDir)
if err := cloneUtilsCmd.Run(); err != nil {
return fmt.Errorf("failed to clone hyprutils: %w", err)
}
configureUtilsCmd := exec.CommandContext(ctx, "cmake",
"--no-warn-unused-cli",
"-DCMAKE_BUILD_TYPE:STRING=Release",
"-DCMAKE_INSTALL_PREFIX:PATH=/usr",
"-DBUILD_TESTING=off",
"-S", ".",
"-B", "./build")
configureUtilsCmd.Dir = hyprutilsDir
configureUtilsCmd.Env = append(os.Environ(), "TMPDIR="+cacheDir)
if err := m.runWithProgressStep(configureUtilsCmd, progressChan, PhaseSystemPackages, 0.05, 0.1, "Configuring hyprutils..."); err != nil {
return fmt.Errorf("failed to configure hyprutils: %w", err)
}
buildUtilsCmd := exec.CommandContext(ctx, "cmake", "--build", "./build", "--config", "Release", "--target", "all")
buildUtilsCmd.Dir = hyprutilsDir
buildUtilsCmd.Env = append(os.Environ(), "TMPDIR="+cacheDir)
if err := m.runWithProgressStep(buildUtilsCmd, progressChan, PhaseSystemPackages, 0.1, 0.2, "Building hyprutils..."); err != nil {
return fmt.Errorf("failed to build hyprutils: %w", err)
}
installUtilsCmd := ExecSudoCommand(ctx, sudoPassword, "cmake --install ./build")
installUtilsCmd.Dir = hyprutilsDir
if err := installUtilsCmd.Run(); err != nil {
return fmt.Errorf("failed to install hyprutils: %w", err)
}
// Install hyprwayland-scanner
progressChan <- InstallProgressMsg{
Phase: PhaseSystemPackages,
Progress: 0.2,
Step: "Building hyprwayland-scanner dependency...",
IsComplete: false,
CommandInfo: "git clone https://github.com/hyprwm/hyprwayland-scanner.git",
}
scannerDir := filepath.Join(cacheDir, "hyprwayland-scanner-build")
if err := os.MkdirAll(scannerDir, 0755); err != nil {
return fmt.Errorf("failed to create scanner directory: %w", err)
}
defer os.RemoveAll(scannerDir)
cloneScannerCmd := exec.CommandContext(ctx, "git", "clone", "https://github.com/hyprwm/hyprwayland-scanner.git", scannerDir)
if err := cloneScannerCmd.Run(); err != nil {
return fmt.Errorf("failed to clone hyprwayland-scanner: %w", err)
}
configureScannerCmd := exec.CommandContext(ctx, "cmake",
"-DCMAKE_INSTALL_PREFIX=/usr",
"-B", "build")
configureScannerCmd.Dir = scannerDir
configureScannerCmd.Env = append(os.Environ(), "TMPDIR="+cacheDir)
if err := m.runWithProgressStep(configureScannerCmd, progressChan, PhaseSystemPackages, 0.2, 0.25, "Configuring hyprwayland-scanner..."); err != nil {
return fmt.Errorf("failed to configure hyprwayland-scanner: %w", err)
}
buildScannerCmd := exec.CommandContext(ctx, "cmake", "--build", "build", "-j")
buildScannerCmd.Dir = scannerDir
buildScannerCmd.Env = append(os.Environ(), "TMPDIR="+cacheDir)
if err := m.runWithProgressStep(buildScannerCmd, progressChan, PhaseSystemPackages, 0.25, 0.35, "Building hyprwayland-scanner..."); err != nil {
return fmt.Errorf("failed to build hyprwayland-scanner: %w", err)
}
installScannerCmd := ExecSudoCommand(ctx, sudoPassword, "cmake --install build")
installScannerCmd.Dir = scannerDir
if err := installScannerCmd.Run(); err != nil {
return fmt.Errorf("failed to install hyprwayland-scanner: %w", err)
}
// Now build hyprpicker
tmpDir := filepath.Join(cacheDir, "hyprpicker-build")
if err := os.MkdirAll(tmpDir, 0755); err != nil {
return fmt.Errorf("failed to create temp directory: %w", err)
}
defer os.RemoveAll(tmpDir)
progressChan <- InstallProgressMsg{
Phase: PhaseSystemPackages,
Progress: 0.35,
Step: "Cloning hyprpicker repository...",
IsComplete: false,
CommandInfo: "git clone https://github.com/hyprwm/hyprpicker.git",
}
cloneCmd := exec.CommandContext(ctx, "git", "clone", "https://github.com/hyprwm/hyprpicker.git", tmpDir)
if err := cloneCmd.Run(); err != nil {
return fmt.Errorf("failed to clone hyprpicker: %w", err)
}
progressChan <- InstallProgressMsg{
Phase: PhaseSystemPackages,
Progress: 0.45,
Step: "Configuring hyprpicker build...",
IsComplete: false,
CommandInfo: "cmake -B build -S . -DCMAKE_BUILD_TYPE=Release",
}
configureCmd := exec.CommandContext(ctx, "cmake",
"--no-warn-unused-cli",
"-DCMAKE_BUILD_TYPE:STRING=Release",
"-DCMAKE_INSTALL_PREFIX:PATH=/usr",
"-S", ".",
"-B", "./build")
configureCmd.Dir = tmpDir
configureCmd.Env = append(os.Environ(), "TMPDIR="+cacheDir)
output, err := configureCmd.CombinedOutput()
if err != nil {
m.log(fmt.Sprintf("cmake configure failed. Output:\n%s", string(output)))
return fmt.Errorf("failed to configure hyprpicker: %w\nCMake output:\n%s", err, string(output))
}
progressChan <- InstallProgressMsg{
Phase: PhaseSystemPackages,
Progress: 0.55,
Step: "Building hyprpicker...",
IsComplete: false,
CommandInfo: "cmake --build build --target hyprpicker",
}
buildCmd := exec.CommandContext(ctx, "cmake", "--build", "./build", "--config", "Release", "--target", "hyprpicker")
buildCmd.Dir = tmpDir
buildCmd.Env = append(os.Environ(), "TMPDIR="+cacheDir)
if err := m.runWithProgressStep(buildCmd, progressChan, PhaseSystemPackages, 0.55, 0.8, "Building hyprpicker..."); err != nil {
return fmt.Errorf("failed to build hyprpicker: %w", err)
}
progressChan <- InstallProgressMsg{
Phase: PhaseSystemPackages,
Progress: 0.8,
Step: "Installing hyprpicker...",
IsComplete: false,
NeedsSudo: true,
CommandInfo: "sudo cmake --install build",
}
installCmd := ExecSudoCommand(ctx, sudoPassword, "cmake --install ./build")
installCmd.Dir = tmpDir
if err := installCmd.Run(); err != nil {
return fmt.Errorf("failed to install hyprpicker: %w", err)
}
m.log("hyprpicker installed successfully from source")
return nil
}
func (m *ManualPackageInstaller) installGhostty(ctx context.Context, sudoPassword string, progressChan chan<- InstallProgressMsg) error { func (m *ManualPackageInstaller) installGhostty(ctx context.Context, sudoPassword string, progressChan chan<- InstallProgressMsg) error {
m.log("Installing Ghostty from source...") m.log("Installing Ghostty from source...")
@@ -803,52 +617,6 @@ func (m *ManualPackageInstaller) installDankMaterialShell(ctx context.Context, v
return nil return nil
} }
func (m *ManualPackageInstaller) installCliphist(ctx context.Context, sudoPassword string, progressChan chan<- InstallProgressMsg) error {
m.log("Installing cliphist from source...")
progressChan <- InstallProgressMsg{
Phase: PhaseSystemPackages,
Progress: 0.1,
Step: "Installing cliphist via go install...",
IsComplete: false,
CommandInfo: "go install go.senan.xyz/cliphist@latest",
}
installCmd := exec.CommandContext(ctx, "go", "install", "go.senan.xyz/cliphist@latest")
if err := m.runWithProgressStep(installCmd, progressChan, PhaseSystemPackages, 0.1, 0.7, "Building cliphist..."); err != nil {
return fmt.Errorf("failed to install cliphist: %w", err)
}
homeDir := os.Getenv("HOME")
sourcePath := filepath.Join(homeDir, "go", "bin", "cliphist")
targetPath := "/usr/local/bin/cliphist"
progressChan <- InstallProgressMsg{
Phase: PhaseSystemPackages,
Progress: 0.7,
Step: "Installing cliphist binary to system...",
IsComplete: false,
NeedsSudo: true,
CommandInfo: fmt.Sprintf("sudo cp %s %s", sourcePath, targetPath),
}
copyCmd := exec.CommandContext(ctx, "sudo", "-S", "cp", sourcePath, targetPath)
copyCmd.Stdin = strings.NewReader(sudoPassword + "\n")
if err := copyCmd.Run(); err != nil {
return fmt.Errorf("failed to copy cliphist to /usr/local/bin: %w", err)
}
// Make it executable
chmodCmd := exec.CommandContext(ctx, "sudo", "-S", "chmod", "+x", targetPath)
chmodCmd.Stdin = strings.NewReader(sudoPassword + "\n")
if err := chmodCmd.Run(); err != nil {
return fmt.Errorf("failed to make cliphist executable: %w", err)
}
m.log("cliphist installed successfully from source")
return nil
}
func (m *ManualPackageInstaller) installXwaylandSatellite(ctx context.Context, sudoPassword string, progressChan chan<- InstallProgressMsg) error { func (m *ManualPackageInstaller) installXwaylandSatellite(ctx context.Context, sudoPassword string, progressChan chan<- InstallProgressMsg) error {
m.log("Installing xwayland-satellite from source...") m.log("Installing xwayland-satellite from source...")
+1 -5
View File
@@ -78,10 +78,8 @@ func (o *OpenSUSEDistribution) DetectDependenciesWithTerminal(ctx context.Contex
dependencies = append(dependencies, o.detectXwaylandSatellite()) dependencies = append(dependencies, o.detectXwaylandSatellite())
} }
// Base detections (common across distros)
dependencies = append(dependencies, o.detectMatugen()) dependencies = append(dependencies, o.detectMatugen())
dependencies = append(dependencies, o.detectDgop()) dependencies = append(dependencies, o.detectDgop())
dependencies = append(dependencies, o.detectClipboardTools()...)
return dependencies, nil return dependencies, nil
} }
@@ -107,10 +105,8 @@ func (o *OpenSUSEDistribution) GetPackageMappingWithVariants(wm deps.WindowManag
"ghostty": {Name: "ghostty", Repository: RepoTypeSystem}, "ghostty": {Name: "ghostty", Repository: RepoTypeSystem},
"kitty": {Name: "kitty", Repository: RepoTypeSystem}, "kitty": {Name: "kitty", Repository: RepoTypeSystem},
"alacritty": {Name: "alacritty", Repository: RepoTypeSystem}, "alacritty": {Name: "alacritty", Repository: RepoTypeSystem},
"wl-clipboard": {Name: "wl-clipboard", Repository: RepoTypeSystem},
"xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem}, "xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem},
"accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem}, "accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem},
"cliphist": {Name: "cliphist", Repository: RepoTypeSystem},
// DMS packages from OBS // DMS packages from OBS
"dms (DankMaterialShell)": o.getDmsMapping(variants["dms (DankMaterialShell)"]), "dms (DankMaterialShell)": o.getDmsMapping(variants["dms (DankMaterialShell)"]),
@@ -371,7 +367,7 @@ func (o *OpenSUSEDistribution) InstallPackages(ctx context.Context, dependencies
o.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err)) o.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err))
} }
if err := o.EnableDMSService(ctx); err != nil { if err := o.EnableDMSService(ctx, wm); err != nil {
o.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err)) o.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err))
} }
+2 -8
View File
@@ -76,10 +76,8 @@ func (u *UbuntuDistribution) DetectDependenciesWithTerminal(ctx context.Context,
dependencies = append(dependencies, u.detectXwaylandSatellite()) dependencies = append(dependencies, u.detectXwaylandSatellite())
} }
// Base detections (common across distros)
dependencies = append(dependencies, u.detectMatugen()) dependencies = append(dependencies, u.detectMatugen())
dependencies = append(dependencies, u.detectDgop()) dependencies = append(dependencies, u.detectDgop())
dependencies = append(dependencies, u.detectClipboardTools()...)
return dependencies, nil return dependencies, nil
} }
@@ -112,7 +110,6 @@ func (u *UbuntuDistribution) GetPackageMappingWithVariants(wm deps.WindowManager
"git": {Name: "git", Repository: RepoTypeSystem}, "git": {Name: "git", Repository: RepoTypeSystem},
"kitty": {Name: "kitty", Repository: RepoTypeSystem}, "kitty": {Name: "kitty", Repository: RepoTypeSystem},
"alacritty": {Name: "alacritty", Repository: RepoTypeSystem}, "alacritty": {Name: "alacritty", Repository: RepoTypeSystem},
"wl-clipboard": {Name: "wl-clipboard", Repository: RepoTypeSystem},
"xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem}, "xdg-desktop-portal-gtk": {Name: "xdg-desktop-portal-gtk", Repository: RepoTypeSystem},
"accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem}, "accountsservice": {Name: "accountsservice", Repository: RepoTypeSystem},
@@ -121,7 +118,6 @@ func (u *UbuntuDistribution) GetPackageMappingWithVariants(wm deps.WindowManager
"quickshell": u.getQuickshellMapping(variants["quickshell"]), "quickshell": u.getQuickshellMapping(variants["quickshell"]),
"matugen": {Name: "matugen", Repository: RepoTypePPA, RepoURL: "ppa:avengemedia/danklinux"}, "matugen": {Name: "matugen", Repository: RepoTypePPA, RepoURL: "ppa:avengemedia/danklinux"},
"dgop": {Name: "dgop", Repository: RepoTypePPA, RepoURL: "ppa:avengemedia/danklinux"}, "dgop": {Name: "dgop", Repository: RepoTypePPA, RepoURL: "ppa:avengemedia/danklinux"},
"cliphist": {Name: "cliphist", Repository: RepoTypePPA, RepoURL: "ppa:avengemedia/danklinux"},
"ghostty": {Name: "ghostty", Repository: RepoTypePPA, RepoURL: "ppa:avengemedia/danklinux"}, "ghostty": {Name: "ghostty", Repository: RepoTypePPA, RepoURL: "ppa:avengemedia/danklinux"},
} }
@@ -331,7 +327,7 @@ func (u *UbuntuDistribution) InstallPackages(ctx context.Context, dependencies [
u.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err)) u.log(fmt.Sprintf("Warning: failed to write window manager config: %v", err))
} }
if err := u.EnableDMSService(ctx); err != nil { if err := u.EnableDMSService(ctx, wm); err != nil {
u.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err)) u.log(fmt.Sprintf("Warning: failed to enable dms service: %v", err))
} }
@@ -539,8 +535,6 @@ func (u *UbuntuDistribution) installBuildDependencies(ctx context.Context, manua
buildDeps["libpam0g-dev"] = true buildDeps["libpam0g-dev"] = true
case "matugen": case "matugen":
buildDeps["curl"] = true buildDeps["curl"] = true
case "cliphist":
// Go will be installed separately with PPA
} }
} }
@@ -550,7 +544,7 @@ func (u *UbuntuDistribution) installBuildDependencies(ctx context.Context, manua
if err := u.installRust(ctx, sudoPassword, progressChan); err != nil { if err := u.installRust(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install Rust: %w", err) return fmt.Errorf("failed to install Rust: %w", err)
} }
case "cliphist", "dgop": case "dgop":
if err := u.installGo(ctx, sudoPassword, progressChan); err != nil { if err := u.installGo(ctx, sudoPassword, progressChan); err != nil {
return fmt.Errorf("failed to install Go: %w", err) return fmt.Errorf("failed to install Go: %w", err)
} }
+1 -1
View File
@@ -518,7 +518,7 @@ func (m Model) categorizeDependencies() map[string][]DependencyInfo {
categories["Hyprland Components"] = append(categories["Hyprland Components"], dep) categories["Hyprland Components"] = append(categories["Hyprland Components"], dep)
case "niri": case "niri":
categories["Niri Components"] = append(categories["Niri Components"], dep) categories["Niri Components"] = append(categories["Niri Components"], dep)
case "kitty", "alacritty", "ghostty", "hyprpicker": case "kitty", "alacritty", "ghostty":
categories["Shared Components"] = append(categories["Shared Components"], dep) categories["Shared Components"] = append(categories["Shared Components"], dep)
default: default:
categories["Shared Components"] = append(categories["Shared Components"], dep) categories["Shared Components"] = append(categories["Shared Components"], dep)
+3 -3
View File
@@ -16,9 +16,9 @@ type DiscoveryConfig struct {
func DefaultDiscoveryConfig() *DiscoveryConfig { func DefaultDiscoveryConfig() *DiscoveryConfig {
var searchPaths []string var searchPaths []string
configHome := utils.XDGConfigHome() configDir, err := os.UserConfigDir()
if configHome != "" { if err == nil && configDir != "" {
searchPaths = append(searchPaths, filepath.Join(configHome, "DankMaterialShell", "cheatsheets")) searchPaths = append(searchPaths, filepath.Join(configDir, "DankMaterialShell", "cheatsheets"))
} }
configDirs := os.Getenv("XDG_CONFIG_DIRS") configDirs := os.Getenv("XDG_CONFIG_DIRS")
+18 -4
View File
@@ -6,10 +6,10 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv"
"strings" "strings"
"github.com/AvengeMedia/DankMaterialShell/core/internal/keybinds" "github.com/AvengeMedia/DankMaterialShell/core/internal/keybinds"
"github.com/AvengeMedia/DankMaterialShell/core/internal/utils"
"github.com/sblinch/kdl-go" "github.com/sblinch/kdl-go"
"github.com/sblinch/kdl-go/document" "github.com/sblinch/kdl-go/document"
) )
@@ -30,7 +30,11 @@ func NewNiriProvider(configDir string) *NiriProvider {
} }
func defaultNiriConfigDir() string { func defaultNiriConfigDir() string {
return filepath.Join(utils.XDGConfigHome(), "niri") configDir, err := os.UserConfigDir()
if err != nil {
return ""
}
return filepath.Join(configDir, "niri")
} }
func (n *NiriProvider) Name() string { func (n *NiriProvider) Name() string {
@@ -153,6 +157,7 @@ func (n *NiriProvider) convertKeybind(kb *NiriKeyBinding, subcategory string, co
Subcategory: subcategory, Subcategory: subcategory,
Source: source, Source: source,
HideOnOverlay: kb.HideOnOverlay, HideOnOverlay: kb.HideOnOverlay,
CooldownMs: kb.CooldownMs,
} }
if source == "dms" && conflicts != nil { if source == "dms" && conflicts != nil {
@@ -310,7 +315,9 @@ func (n *NiriProvider) extractOptions(node *document.Node) map[string]any {
opts["repeat"] = val.String() == "true" opts["repeat"] = val.String() == "true"
} }
if val, ok := node.Properties.Get("cooldown-ms"); ok { if val, ok := node.Properties.Get("cooldown-ms"); ok {
opts["cooldown-ms"] = val.String() if ms, err := strconv.Atoi(val.String()); err == nil {
opts["cooldown-ms"] = ms
}
} }
if val, ok := node.Properties.Get("allow-when-locked"); ok { if val, ok := node.Properties.Get("allow-when-locked"); ok {
opts["allow-when-locked"] = val.String() == "true" opts["allow-when-locked"] = val.String() == "true"
@@ -336,7 +343,14 @@ func (n *NiriProvider) buildBindNode(bind *overrideBind) *document.Node {
node.AddProperty("repeat", false, "") node.AddProperty("repeat", false, "")
} }
if v, ok := bind.Options["cooldown-ms"]; ok { if v, ok := bind.Options["cooldown-ms"]; ok {
node.AddProperty("cooldown-ms", v, "") switch val := v.(type) {
case int:
node.AddProperty("cooldown-ms", val, "")
case string:
if ms, err := strconv.Atoi(val); err == nil {
node.AddProperty("cooldown-ms", ms, "")
}
}
} }
if v, ok := bind.Options["allow-when-locked"]; ok && v == true { if v, ok := bind.Options["allow-when-locked"]; ok && v == true {
node.AddProperty("allow-when-locked", true, "") node.AddProperty("allow-when-locked", true, "")
@@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"github.com/sblinch/kdl-go" "github.com/sblinch/kdl-go"
@@ -17,6 +18,7 @@ type NiriKeyBinding struct {
Args []string Args []string
Description string Description string
HideOnOverlay bool HideOnOverlay bool
CooldownMs int
Source string Source string
} }
@@ -275,6 +277,7 @@ func (p *NiriParser) parseKeybindNode(node *document.Node, _ string) *NiriKeyBin
var description string var description string
var hideOnOverlay bool var hideOnOverlay bool
var cooldownMs int
if node.Properties != nil { if node.Properties != nil {
if val, ok := node.Properties.Get("hotkey-overlay-title"); ok { if val, ok := node.Properties.Get("hotkey-overlay-title"); ok {
switch val.ValueString() { switch val.ValueString() {
@@ -284,6 +287,9 @@ func (p *NiriParser) parseKeybindNode(node *document.Node, _ string) *NiriKeyBin
description = val.ValueString() description = val.ValueString()
} }
} }
if val, ok := node.Properties.Get("cooldown-ms"); ok {
cooldownMs, _ = strconv.Atoi(val.String())
}
} }
return &NiriKeyBinding{ return &NiriKeyBinding{
@@ -293,6 +299,7 @@ func (p *NiriParser) parseKeybindNode(node *document.Node, _ string) *NiriKeyBin
Args: args, Args: args,
Description: description, Description: description,
HideOnOverlay: hideOnOverlay, HideOnOverlay: hideOnOverlay,
CooldownMs: cooldownMs,
Source: p.currentSource, Source: p.currentSource,
} }
} }
+1
View File
@@ -7,6 +7,7 @@ type Keybind struct {
Subcategory string `json:"subcat,omitempty"` Subcategory string `json:"subcat,omitempty"`
Source string `json:"source,omitempty"` Source string `json:"source,omitempty"`
HideOnOverlay bool `json:"hideOnOverlay,omitempty"` HideOnOverlay bool `json:"hideOnOverlay,omitempty"`
CooldownMs int `json:"cooldownMs,omitempty"`
Conflict *Keybind `json:"conflict,omitempty"` Conflict *Keybind `json:"conflict,omitempty"`
} }
+68 -23
View File
@@ -34,6 +34,7 @@ type Options struct {
StockColors string StockColors string
SyncModeWithPortal bool SyncModeWithPortal bool
TerminalsAlwaysDark bool TerminalsAlwaysDark bool
SkipTemplates string
} }
type ColorsOutput struct { type ColorsOutput struct {
@@ -47,6 +48,18 @@ func (o *Options) ColorsOutput() string {
return filepath.Join(o.StateDir, "dms-colors.json") return filepath.Join(o.StateDir, "dms-colors.json")
} }
func (o *Options) ShouldSkipTemplate(name string) bool {
if o.SkipTemplates == "" {
return false
}
for _, skip := range strings.Split(o.SkipTemplates, ",") {
if strings.TrimSpace(skip) == name {
return true
}
}
return false
}
func Run(opts Options) error { func Run(opts Options) error {
if opts.StateDir == "" { if opts.StateDir == "" {
return fmt.Errorf("state-dir is required") return fmt.Errorf("state-dir is required")
@@ -218,34 +231,66 @@ output_path = '%s'
`, opts.ShellDir, opts.ColorsOutput()) `, opts.ShellDir, opts.ColorsOutput())
switch opts.Mode { if !opts.ShouldSkipTemplate("gtk") {
case "light": switch opts.Mode {
appendConfig(opts, cfgFile, "skip", "gtk3-light.toml") case "light":
default: appendConfig(opts, cfgFile, "skip", "gtk3-light.toml")
appendConfig(opts, cfgFile, "skip", "gtk3-dark.toml") default:
appendConfig(opts, cfgFile, "skip", "gtk3-dark.toml")
}
} }
appendConfig(opts, cfgFile, "niri", "niri.toml") if !opts.ShouldSkipTemplate("niri") {
appendConfig(opts, cfgFile, "qt5ct", "qt5ct.toml") appendConfig(opts, cfgFile, "niri", "niri.toml")
appendConfig(opts, cfgFile, "qt6ct", "qt6ct.toml") }
appendConfig(opts, cfgFile, "firefox", "firefox.toml") if !opts.ShouldSkipTemplate("qt5ct") {
appendConfig(opts, cfgFile, "pywalfox", "pywalfox.toml") appendConfig(opts, cfgFile, "qt5ct", "qt5ct.toml")
appendConfig(opts, cfgFile, "vesktop", "vesktop.toml") }
if !opts.ShouldSkipTemplate("qt6ct") {
appendConfig(opts, cfgFile, "qt6ct", "qt6ct.toml")
}
if !opts.ShouldSkipTemplate("firefox") {
appendConfig(opts, cfgFile, "firefox", "firefox.toml")
}
if !opts.ShouldSkipTemplate("pywalfox") {
appendConfig(opts, cfgFile, "pywalfox", "pywalfox.toml")
}
if !opts.ShouldSkipTemplate("vesktop") {
appendConfig(opts, cfgFile, "vesktop", "vesktop.toml")
}
appendTerminalConfig(opts, cfgFile, tmpDir, "ghostty", "ghostty.toml") if !opts.ShouldSkipTemplate("ghostty") {
appendTerminalConfig(opts, cfgFile, tmpDir, "kitty", "kitty.toml") appendTerminalConfig(opts, cfgFile, tmpDir, "ghostty", "ghostty.toml")
appendTerminalConfig(opts, cfgFile, tmpDir, "foot", "foot.toml") }
appendTerminalConfig(opts, cfgFile, tmpDir, "alacritty", "alacritty.toml") if !opts.ShouldSkipTemplate("kitty") {
appendTerminalConfig(opts, cfgFile, tmpDir, "wezterm", "wezterm.toml") appendTerminalConfig(opts, cfgFile, tmpDir, "kitty", "kitty.toml")
}
if !opts.ShouldSkipTemplate("foot") {
appendTerminalConfig(opts, cfgFile, tmpDir, "foot", "foot.toml")
}
if !opts.ShouldSkipTemplate("alacritty") {
appendTerminalConfig(opts, cfgFile, tmpDir, "alacritty", "alacritty.toml")
}
if !opts.ShouldSkipTemplate("wezterm") {
appendTerminalConfig(opts, cfgFile, tmpDir, "wezterm", "wezterm.toml")
}
appendConfig(opts, cfgFile, "dgop", "dgop.toml") if !opts.ShouldSkipTemplate("dgop") {
appendConfig(opts, cfgFile, "dgop", "dgop.toml")
}
homeDir, _ := os.UserHomeDir() if !opts.ShouldSkipTemplate("kcolorscheme") {
appendVSCodeConfig(cfgFile, "vscode", filepath.Join(homeDir, ".vscode/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir) appendConfig(opts, cfgFile, "skip", "kcolorscheme.toml")
appendVSCodeConfig(cfgFile, "codium", filepath.Join(homeDir, ".vscode-oss/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir) }
appendVSCodeConfig(cfgFile, "codeoss", filepath.Join(homeDir, ".config/Code - OSS/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir)
appendVSCodeConfig(cfgFile, "cursor", filepath.Join(homeDir, ".cursor/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir) if !opts.ShouldSkipTemplate("vscode") {
appendVSCodeConfig(cfgFile, "windsurf", filepath.Join(homeDir, ".windsurf/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir) homeDir, _ := os.UserHomeDir()
appendVSCodeConfig(cfgFile, "vscode", filepath.Join(homeDir, ".vscode/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir)
appendVSCodeConfig(cfgFile, "codium", filepath.Join(homeDir, ".vscode-oss/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir)
appendVSCodeConfig(cfgFile, "codeoss", filepath.Join(homeDir, ".config/Code - OSS/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir)
appendVSCodeConfig(cfgFile, "cursor", filepath.Join(homeDir, ".cursor/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir)
appendVSCodeConfig(cfgFile, "windsurf", filepath.Join(homeDir, ".windsurf/extensions/local.dynamic-base16-dankshell-0.0.1"), opts.ShellDir)
}
if opts.RunUserTemplates { if opts.RunUserTemplates {
if data, err := os.ReadFile(userConfigPath); err == nil { if data, err := os.ReadFile(userConfigPath); err == nil {
@@ -0,0 +1,229 @@
// Code generated by mockery v2.53.5. DO NOT EDIT.
package mocks_wlclient
import (
client "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
mock "github.com/stretchr/testify/mock"
)
// MockWaylandDisplay is an autogenerated mock type for the WaylandDisplay type
type MockWaylandDisplay struct {
mock.Mock
}
type MockWaylandDisplay_Expecter struct {
mock *mock.Mock
}
func (_m *MockWaylandDisplay) EXPECT() *MockWaylandDisplay_Expecter {
return &MockWaylandDisplay_Expecter{mock: &_m.Mock}
}
// Context provides a mock function with no fields
func (_m *MockWaylandDisplay) Context() *client.Context {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Context")
}
var r0 *client.Context
if rf, ok := ret.Get(0).(func() *client.Context); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*client.Context)
}
}
return r0
}
// MockWaylandDisplay_Context_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Context'
type MockWaylandDisplay_Context_Call struct {
*mock.Call
}
// Context is a helper method to define mock.On call
func (_e *MockWaylandDisplay_Expecter) Context() *MockWaylandDisplay_Context_Call {
return &MockWaylandDisplay_Context_Call{Call: _e.mock.On("Context")}
}
func (_c *MockWaylandDisplay_Context_Call) Run(run func()) *MockWaylandDisplay_Context_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockWaylandDisplay_Context_Call) Return(_a0 *client.Context) *MockWaylandDisplay_Context_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockWaylandDisplay_Context_Call) RunAndReturn(run func() *client.Context) *MockWaylandDisplay_Context_Call {
_c.Call.Return(run)
return _c
}
// Destroy provides a mock function with no fields
func (_m *MockWaylandDisplay) Destroy() error {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Destroy")
}
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// MockWaylandDisplay_Destroy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Destroy'
type MockWaylandDisplay_Destroy_Call struct {
*mock.Call
}
// Destroy is a helper method to define mock.On call
func (_e *MockWaylandDisplay_Expecter) Destroy() *MockWaylandDisplay_Destroy_Call {
return &MockWaylandDisplay_Destroy_Call{Call: _e.mock.On("Destroy")}
}
func (_c *MockWaylandDisplay_Destroy_Call) Run(run func()) *MockWaylandDisplay_Destroy_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockWaylandDisplay_Destroy_Call) Return(_a0 error) *MockWaylandDisplay_Destroy_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockWaylandDisplay_Destroy_Call) RunAndReturn(run func() error) *MockWaylandDisplay_Destroy_Call {
_c.Call.Return(run)
return _c
}
// GetRegistry provides a mock function with no fields
func (_m *MockWaylandDisplay) GetRegistry() (*client.Registry, error) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for GetRegistry")
}
var r0 *client.Registry
var r1 error
if rf, ok := ret.Get(0).(func() (*client.Registry, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() *client.Registry); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*client.Registry)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockWaylandDisplay_GetRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRegistry'
type MockWaylandDisplay_GetRegistry_Call struct {
*mock.Call
}
// GetRegistry is a helper method to define mock.On call
func (_e *MockWaylandDisplay_Expecter) GetRegistry() *MockWaylandDisplay_GetRegistry_Call {
return &MockWaylandDisplay_GetRegistry_Call{Call: _e.mock.On("GetRegistry")}
}
func (_c *MockWaylandDisplay_GetRegistry_Call) Run(run func()) *MockWaylandDisplay_GetRegistry_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockWaylandDisplay_GetRegistry_Call) Return(_a0 *client.Registry, _a1 error) *MockWaylandDisplay_GetRegistry_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *MockWaylandDisplay_GetRegistry_Call) RunAndReturn(run func() (*client.Registry, error)) *MockWaylandDisplay_GetRegistry_Call {
_c.Call.Return(run)
return _c
}
// Roundtrip provides a mock function with no fields
func (_m *MockWaylandDisplay) Roundtrip() error {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Roundtrip")
}
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// MockWaylandDisplay_Roundtrip_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Roundtrip'
type MockWaylandDisplay_Roundtrip_Call struct {
*mock.Call
}
// Roundtrip is a helper method to define mock.On call
func (_e *MockWaylandDisplay_Expecter) Roundtrip() *MockWaylandDisplay_Roundtrip_Call {
return &MockWaylandDisplay_Roundtrip_Call{Call: _e.mock.On("Roundtrip")}
}
func (_c *MockWaylandDisplay_Roundtrip_Call) Run(run func()) *MockWaylandDisplay_Roundtrip_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockWaylandDisplay_Roundtrip_Call) Return(_a0 error) *MockWaylandDisplay_Roundtrip_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockWaylandDisplay_Roundtrip_Call) RunAndReturn(run func() error) *MockWaylandDisplay_Roundtrip_Call {
_c.Call.Return(run)
return _c
}
// NewMockWaylandDisplay creates a new instance of MockWaylandDisplay. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockWaylandDisplay(t interface {
mock.TestingT
Cleanup(func())
}) *MockWaylandDisplay {
mock := &MockWaylandDisplay{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
@@ -0,0 +1,226 @@
// Code generated by mockery v2.53.5. DO NOT EDIT.
package mocks_wlcontext
import (
client "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
mock "github.com/stretchr/testify/mock"
)
// MockWaylandContext is an autogenerated mock type for the WaylandContext type
type MockWaylandContext struct {
mock.Mock
}
type MockWaylandContext_Expecter struct {
mock *mock.Mock
}
func (_m *MockWaylandContext) EXPECT() *MockWaylandContext_Expecter {
return &MockWaylandContext_Expecter{mock: &_m.Mock}
}
// Close provides a mock function with no fields
func (_m *MockWaylandContext) Close() {
_m.Called()
}
// MockWaylandContext_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
type MockWaylandContext_Close_Call struct {
*mock.Call
}
// Close is a helper method to define mock.On call
func (_e *MockWaylandContext_Expecter) Close() *MockWaylandContext_Close_Call {
return &MockWaylandContext_Close_Call{Call: _e.mock.On("Close")}
}
func (_c *MockWaylandContext_Close_Call) Run(run func()) *MockWaylandContext_Close_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockWaylandContext_Close_Call) Return() *MockWaylandContext_Close_Call {
_c.Call.Return()
return _c
}
func (_c *MockWaylandContext_Close_Call) RunAndReturn(run func()) *MockWaylandContext_Close_Call {
_c.Run(run)
return _c
}
// Display provides a mock function with no fields
func (_m *MockWaylandContext) Display() *client.Display {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Display")
}
var r0 *client.Display
if rf, ok := ret.Get(0).(func() *client.Display); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*client.Display)
}
}
return r0
}
// MockWaylandContext_Display_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Display'
type MockWaylandContext_Display_Call struct {
*mock.Call
}
// Display is a helper method to define mock.On call
func (_e *MockWaylandContext_Expecter) Display() *MockWaylandContext_Display_Call {
return &MockWaylandContext_Display_Call{Call: _e.mock.On("Display")}
}
func (_c *MockWaylandContext_Display_Call) Run(run func()) *MockWaylandContext_Display_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockWaylandContext_Display_Call) Return(_a0 *client.Display) *MockWaylandContext_Display_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockWaylandContext_Display_Call) RunAndReturn(run func() *client.Display) *MockWaylandContext_Display_Call {
_c.Call.Return(run)
return _c
}
// FatalError provides a mock function with no fields
func (_m *MockWaylandContext) FatalError() <-chan error {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for FatalError")
}
var r0 <-chan error
if rf, ok := ret.Get(0).(func() <-chan error); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan error)
}
}
return r0
}
// MockWaylandContext_FatalError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FatalError'
type MockWaylandContext_FatalError_Call struct {
*mock.Call
}
// FatalError is a helper method to define mock.On call
func (_e *MockWaylandContext_Expecter) FatalError() *MockWaylandContext_FatalError_Call {
return &MockWaylandContext_FatalError_Call{Call: _e.mock.On("FatalError")}
}
func (_c *MockWaylandContext_FatalError_Call) Run(run func()) *MockWaylandContext_FatalError_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockWaylandContext_FatalError_Call) Return(_a0 <-chan error) *MockWaylandContext_FatalError_Call {
_c.Call.Return(_a0)
return _c
}
func (_c *MockWaylandContext_FatalError_Call) RunAndReturn(run func() <-chan error) *MockWaylandContext_FatalError_Call {
_c.Call.Return(run)
return _c
}
// Post provides a mock function with given fields: fn
func (_m *MockWaylandContext) Post(fn func()) {
_m.Called(fn)
}
// MockWaylandContext_Post_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Post'
type MockWaylandContext_Post_Call struct {
*mock.Call
}
// Post is a helper method to define mock.On call
// - fn func()
func (_e *MockWaylandContext_Expecter) Post(fn interface{}) *MockWaylandContext_Post_Call {
return &MockWaylandContext_Post_Call{Call: _e.mock.On("Post", fn)}
}
func (_c *MockWaylandContext_Post_Call) Run(run func(fn func())) *MockWaylandContext_Post_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(func()))
})
return _c
}
func (_c *MockWaylandContext_Post_Call) Return() *MockWaylandContext_Post_Call {
_c.Call.Return()
return _c
}
func (_c *MockWaylandContext_Post_Call) RunAndReturn(run func(func())) *MockWaylandContext_Post_Call {
_c.Run(run)
return _c
}
// Start provides a mock function with no fields
func (_m *MockWaylandContext) Start() {
_m.Called()
}
// MockWaylandContext_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
type MockWaylandContext_Start_Call struct {
*mock.Call
}
// Start is a helper method to define mock.On call
func (_e *MockWaylandContext_Expecter) Start() *MockWaylandContext_Start_Call {
return &MockWaylandContext_Start_Call{Call: _e.mock.On("Start")}
}
func (_c *MockWaylandContext_Start_Call) Run(run func()) *MockWaylandContext_Start_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockWaylandContext_Start_Call) Return() *MockWaylandContext_Start_Call {
_c.Call.Return()
return _c
}
func (_c *MockWaylandContext_Start_Call) RunAndReturn(run func()) *MockWaylandContext_Start_Call {
_c.Run(run)
return _c
}
// NewMockWaylandContext creates a new instance of MockWaylandContext. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockWaylandContext(t interface {
mock.TestingT
Cleanup(func())
}) *MockWaylandContext {
mock := &MockWaylandContext{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
+7 -2
View File
@@ -9,7 +9,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/AvengeMedia/DankMaterialShell/core/internal/utils" "github.com/AvengeMedia/DankMaterialShell/core/internal/log"
"github.com/spf13/afero" "github.com/spf13/afero"
) )
@@ -33,7 +33,12 @@ func NewManagerWithFs(fs afero.Fs) (*Manager, error) {
} }
func getPluginsDir() string { func getPluginsDir() string {
return filepath.Join(utils.XDGConfigHome(), "DankMaterialShell", "plugins") configDir, err := os.UserConfigDir()
if err != nil {
log.Error("failed to get user config dir", "err", err)
return ""
}
return filepath.Join(configDir, "DankMaterialShell", "plugins")
} }
func (m *Manager) IsInstalled(plugin Plugin) (bool, error) { func (m *Manager) IsInstalled(plugin Plugin) (bool, error) {
@@ -0,0 +1,695 @@
// Generated by go-wayland-scanner
// https://github.com/yaslama/go-wayland/cmd/go-wayland-scanner
// XML file : internal/proto/xml/ext-data-control-v1.xml
//
// ext_data_control_v1 Protocol Copyright:
//
// Copyright © 2018 Simon Ser
// Copyright © 2019 Ivan Molodetskikh
// Copyright © 2024 Neal Gompa
//
// Permission to use, copy, modify, distribute, and sell this
// software and its documentation for any purpose is hereby granted
// without fee, provided that the above copyright notice appear in
// all copies and that both that copyright notice and this permission
// notice appear in supporting documentation, and that the name of
// the copyright holders not be used in advertising or publicity
// pertaining to distribution of the software without specific,
// written prior permission. The copyright holders make no
// representations about the suitability of this software for any
// purpose. It is provided "as is" without express or implied
// warranty.
//
// THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
// SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
// FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
// SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
// AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
// ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
// THIS SOFTWARE.
package ext_data_control
import (
"github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
"golang.org/x/sys/unix"
)
// ExtDataControlManagerV1InterfaceName is the name of the interface as it appears in the [client.Registry].
// It can be used to match the [client.RegistryGlobalEvent.Interface] in the
// [Registry.SetGlobalHandler] and can be used in [Registry.Bind] if this applies.
const ExtDataControlManagerV1InterfaceName = "ext_data_control_manager_v1"
// ExtDataControlManagerV1 : manager to control data devices
//
// This interface is a manager that allows creating per-seat data device
// controls.
type ExtDataControlManagerV1 struct {
client.BaseProxy
}
// NewExtDataControlManagerV1 : manager to control data devices
//
// This interface is a manager that allows creating per-seat data device
// controls.
func NewExtDataControlManagerV1(ctx *client.Context) *ExtDataControlManagerV1 {
extDataControlManagerV1 := &ExtDataControlManagerV1{}
ctx.Register(extDataControlManagerV1)
return extDataControlManagerV1
}
// CreateDataSource : create a new data source
//
// Create a new data source.
func (i *ExtDataControlManagerV1) CreateDataSource() (*ExtDataControlSourceV1, error) {
id := NewExtDataControlSourceV1(i.Context())
const opcode = 0
const _reqBufLen = 8 + 4
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
client.PutUint32(_reqBuf[l:l+4], id.ID())
l += 4
err := i.Context().WriteMsg(_reqBuf[:], nil)
return id, err
}
// GetDataDevice : get a data device for a seat
//
// Create a data device that can be used to manage a seat's selection.
func (i *ExtDataControlManagerV1) GetDataDevice(seat *client.Seat) (*ExtDataControlDeviceV1, error) {
id := NewExtDataControlDeviceV1(i.Context())
const opcode = 1
const _reqBufLen = 8 + 4 + 4
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
client.PutUint32(_reqBuf[l:l+4], id.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], seat.ID())
l += 4
err := i.Context().WriteMsg(_reqBuf[:], nil)
return id, err
}
// GetDataDeviceWithProxy : get a data device for a seat using a pre-created proxy
//
// Like GetDataDevice, but uses a pre-created ExtDataControlDeviceV1 proxy.
// This allows setting up event handlers before the request is sent.
func (i *ExtDataControlManagerV1) GetDataDeviceWithProxy(device *ExtDataControlDeviceV1, seat *client.Seat) error {
const opcode = 1
const _reqBufLen = 8 + 4 + 4
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
client.PutUint32(_reqBuf[l:l+4], device.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], seat.ID())
l += 4
return i.Context().WriteMsg(_reqBuf[:], nil)
}
// Destroy : destroy the manager
//
// All objects created by the manager will still remain valid, until their
// appropriate destroy request has been called.
func (i *ExtDataControlManagerV1) Destroy() error {
defer i.MarkZombie()
const opcode = 2
const _reqBufLen = 8
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
err := i.Context().WriteMsg(_reqBuf[:], nil)
return err
}
// ExtDataControlDeviceV1InterfaceName is the name of the interface as it appears in the [client.Registry].
// It can be used to match the [client.RegistryGlobalEvent.Interface] in the
// [Registry.SetGlobalHandler] and can be used in [Registry.Bind] if this applies.
const ExtDataControlDeviceV1InterfaceName = "ext_data_control_device_v1"
// ExtDataControlDeviceV1 : manage a data device for a seat
//
// This interface allows a client to manage a seat's selection.
//
// When the seat is destroyed, this object becomes inert.
type ExtDataControlDeviceV1 struct {
client.BaseProxy
dataOfferHandler ExtDataControlDeviceV1DataOfferHandlerFunc
selectionHandler ExtDataControlDeviceV1SelectionHandlerFunc
finishedHandler ExtDataControlDeviceV1FinishedHandlerFunc
primarySelectionHandler ExtDataControlDeviceV1PrimarySelectionHandlerFunc
}
// NewExtDataControlDeviceV1 : manage a data device for a seat
//
// This interface allows a client to manage a seat's selection.
//
// When the seat is destroyed, this object becomes inert.
func NewExtDataControlDeviceV1(ctx *client.Context) *ExtDataControlDeviceV1 {
extDataControlDeviceV1 := &ExtDataControlDeviceV1{}
ctx.Register(extDataControlDeviceV1)
return extDataControlDeviceV1
}
// SetSelection : copy data to the selection
//
// This request asks the compositor to set the selection to the data from
// the source on behalf of the client.
//
// The given source may not be used in any further set_selection or
// set_primary_selection requests. Attempting to use a previously used
// source triggers the used_source protocol error.
//
// To unset the selection, set the source to NULL.
func (i *ExtDataControlDeviceV1) SetSelection(source *ExtDataControlSourceV1) error {
const opcode = 0
const _reqBufLen = 8 + 4
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
if source == nil {
client.PutUint32(_reqBuf[l:l+4], 0)
l += 4
} else {
client.PutUint32(_reqBuf[l:l+4], source.ID())
l += 4
}
err := i.Context().WriteMsg(_reqBuf[:], nil)
return err
}
// Destroy : destroy this data device
//
// Destroys the data device object.
func (i *ExtDataControlDeviceV1) Destroy() error {
defer i.MarkZombie()
const opcode = 1
const _reqBufLen = 8
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
err := i.Context().WriteMsg(_reqBuf[:], nil)
return err
}
// SetPrimarySelection : copy data to the primary selection
//
// This request asks the compositor to set the primary selection to the
// data from the source on behalf of the client.
//
// The given source may not be used in any further set_selection or
// set_primary_selection requests. Attempting to use a previously used
// source triggers the used_source protocol error.
//
// To unset the primary selection, set the source to NULL.
//
// The compositor will ignore this request if it does not support primary
// selection.
func (i *ExtDataControlDeviceV1) SetPrimarySelection(source *ExtDataControlSourceV1) error {
const opcode = 2
const _reqBufLen = 8 + 4
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
if source == nil {
client.PutUint32(_reqBuf[l:l+4], 0)
l += 4
} else {
client.PutUint32(_reqBuf[l:l+4], source.ID())
l += 4
}
err := i.Context().WriteMsg(_reqBuf[:], nil)
return err
}
type ExtDataControlDeviceV1Error uint32
// ExtDataControlDeviceV1Error :
const (
// ExtDataControlDeviceV1ErrorUsedSource : source given to set_selection or set_primary_selection was already used before
ExtDataControlDeviceV1ErrorUsedSource ExtDataControlDeviceV1Error = 1
)
func (e ExtDataControlDeviceV1Error) Name() string {
switch e {
case ExtDataControlDeviceV1ErrorUsedSource:
return "used_source"
default:
return ""
}
}
func (e ExtDataControlDeviceV1Error) Value() string {
switch e {
case ExtDataControlDeviceV1ErrorUsedSource:
return "1"
default:
return ""
}
}
func (e ExtDataControlDeviceV1Error) String() string {
return e.Name() + "=" + e.Value()
}
// ExtDataControlDeviceV1DataOfferEvent : introduce a new ext_data_control_offer
//
// The data_offer event introduces a new ext_data_control_offer object,
// which will subsequently be used in either the
// ext_data_control_device.selection event (for the regular clipboard
// selections) or the ext_data_control_device.primary_selection event (for
// the primary clipboard selections). Immediately following the
// ext_data_control_device.data_offer event, the new data_offer object
// will send out ext_data_control_offer.offer events to describe the MIME
// types it offers.
type ExtDataControlDeviceV1DataOfferEvent struct {
Id *ExtDataControlOfferV1
}
type ExtDataControlDeviceV1DataOfferHandlerFunc func(ExtDataControlDeviceV1DataOfferEvent)
// SetDataOfferHandler : sets handler for ExtDataControlDeviceV1DataOfferEvent
func (i *ExtDataControlDeviceV1) SetDataOfferHandler(f ExtDataControlDeviceV1DataOfferHandlerFunc) {
i.dataOfferHandler = f
}
// ExtDataControlDeviceV1SelectionEvent : advertise new selection
//
// The selection event is sent out to notify the client of a new
// ext_data_control_offer for the selection for this device. The
// ext_data_control_device.data_offer and the ext_data_control_offer.offer
// events are sent out immediately before this event to introduce the data
// offer object. The selection event is sent to a client when a new
// selection is set. The ext_data_control_offer is valid until a new
// ext_data_control_offer or NULL is received. The client must destroy the
// previous selection ext_data_control_offer, if any, upon receiving this
// event. Regardless, the previous selection will be ignored once a new
// selection ext_data_control_offer is received.
//
// The first selection event is sent upon binding the
// ext_data_control_device object.
type ExtDataControlDeviceV1SelectionEvent struct {
Id *ExtDataControlOfferV1
OfferId uint32 // Raw object ID for external registry lookups
}
type ExtDataControlDeviceV1SelectionHandlerFunc func(ExtDataControlDeviceV1SelectionEvent)
// SetSelectionHandler : sets handler for ExtDataControlDeviceV1SelectionEvent
func (i *ExtDataControlDeviceV1) SetSelectionHandler(f ExtDataControlDeviceV1SelectionHandlerFunc) {
i.selectionHandler = f
}
// ExtDataControlDeviceV1FinishedEvent : this data control is no longer valid
//
// This data control object is no longer valid and should be destroyed by
// the client.
type ExtDataControlDeviceV1FinishedEvent struct{}
type ExtDataControlDeviceV1FinishedHandlerFunc func(ExtDataControlDeviceV1FinishedEvent)
// SetFinishedHandler : sets handler for ExtDataControlDeviceV1FinishedEvent
func (i *ExtDataControlDeviceV1) SetFinishedHandler(f ExtDataControlDeviceV1FinishedHandlerFunc) {
i.finishedHandler = f
}
// ExtDataControlDeviceV1PrimarySelectionEvent : advertise new primary selection
//
// The primary_selection event is sent out to notify the client of a new
// ext_data_control_offer for the primary selection for this device. The
// ext_data_control_device.data_offer and the ext_data_control_offer.offer
// events are sent out immediately before this event to introduce the data
// offer object. The primary_selection event is sent to a client when a
// new primary selection is set. The ext_data_control_offer is valid until
// a new ext_data_control_offer or NULL is received. The client must
// destroy the previous primary selection ext_data_control_offer, if any,
// upon receiving this event. Regardless, the previous primary selection
// will be ignored once a new primary selection ext_data_control_offer is
// received.
//
// If the compositor supports primary selection, the first
// primary_selection event is sent upon binding the
// ext_data_control_device object.
type ExtDataControlDeviceV1PrimarySelectionEvent struct {
Id *ExtDataControlOfferV1
OfferId uint32 // Raw object ID for external registry lookups
}
type ExtDataControlDeviceV1PrimarySelectionHandlerFunc func(ExtDataControlDeviceV1PrimarySelectionEvent)
// SetPrimarySelectionHandler : sets handler for ExtDataControlDeviceV1PrimarySelectionEvent
func (i *ExtDataControlDeviceV1) SetPrimarySelectionHandler(f ExtDataControlDeviceV1PrimarySelectionHandlerFunc) {
i.primarySelectionHandler = f
}
func (i *ExtDataControlDeviceV1) Dispatch(opcode uint32, fd int, data []byte) {
switch opcode {
case 0:
// data_offer event: server creates a new object (new_id)
if i.dataOfferHandler == nil {
return
}
var e ExtDataControlDeviceV1DataOfferEvent
l := 0
newID := client.Uint32(data[l : l+4])
l += 4
ctx := i.Context()
offer := &ExtDataControlOfferV1{}
offer.SetContext(ctx)
offer.SetID(newID)
ctx.RegisterWithID(offer, newID)
e.Id = offer
i.dataOfferHandler(e)
case 1:
// selection event: nullable object reference
if i.selectionHandler == nil {
return
}
var e ExtDataControlDeviceV1SelectionEvent
l := 0
objID := client.Uint32(data[l : l+4])
l += 4
e.OfferId = objID
if objID != 0 {
if p := i.Context().GetProxy(objID); p != nil {
e.Id = p.(*ExtDataControlOfferV1)
}
}
i.selectionHandler(e)
case 2:
if i.finishedHandler == nil {
return
}
var e ExtDataControlDeviceV1FinishedEvent
i.finishedHandler(e)
case 3:
// primary_selection event: nullable object reference
if i.primarySelectionHandler == nil {
return
}
var e ExtDataControlDeviceV1PrimarySelectionEvent
l := 0
objID := client.Uint32(data[l : l+4])
l += 4
e.OfferId = objID
if objID != 0 {
if p := i.Context().GetProxy(objID); p != nil {
e.Id = p.(*ExtDataControlOfferV1)
}
}
i.primarySelectionHandler(e)
}
}
// ExtDataControlSourceV1InterfaceName is the name of the interface as it appears in the [client.Registry].
// It can be used to match the [client.RegistryGlobalEvent.Interface] in the
// [Registry.SetGlobalHandler] and can be used in [Registry.Bind] if this applies.
const ExtDataControlSourceV1InterfaceName = "ext_data_control_source_v1"
// ExtDataControlSourceV1 : offer to transfer data
//
// The ext_data_control_source object is the source side of a
// ext_data_control_offer. It is created by the source client in a data
// transfer and provides a way to describe the offered data and a way to
// respond to requests to transfer the data.
type ExtDataControlSourceV1 struct {
client.BaseProxy
sendHandler ExtDataControlSourceV1SendHandlerFunc
cancelledHandler ExtDataControlSourceV1CancelledHandlerFunc
}
// NewExtDataControlSourceV1 : offer to transfer data
//
// The ext_data_control_source object is the source side of a
// ext_data_control_offer. It is created by the source client in a data
// transfer and provides a way to describe the offered data and a way to
// respond to requests to transfer the data.
func NewExtDataControlSourceV1(ctx *client.Context) *ExtDataControlSourceV1 {
extDataControlSourceV1 := &ExtDataControlSourceV1{}
ctx.Register(extDataControlSourceV1)
return extDataControlSourceV1
}
// Offer : add an offered MIME type
//
// This request adds a MIME type to the set of MIME types advertised to
// targets. Can be called several times to offer multiple types.
//
// Calling this after ext_data_control_device.set_selection is a protocol
// error.
//
// mimeType: MIME type offered by the data source
func (i *ExtDataControlSourceV1) Offer(mimeType string) error {
const opcode = 0
mimeTypeLen := client.PaddedLen(len(mimeType) + 1)
_reqBufLen := 8 + (4 + mimeTypeLen)
_reqBuf := make([]byte, _reqBufLen)
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
client.PutString(_reqBuf[l:l+(4+mimeTypeLen)], mimeType)
l += (4 + mimeTypeLen)
err := i.Context().WriteMsg(_reqBuf, nil)
return err
}
// Destroy : destroy this source
//
// Destroys the data source object.
func (i *ExtDataControlSourceV1) Destroy() error {
defer i.MarkZombie()
const opcode = 1
const _reqBufLen = 8
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
err := i.Context().WriteMsg(_reqBuf[:], nil)
return err
}
type ExtDataControlSourceV1Error uint32
// ExtDataControlSourceV1Error :
const (
// ExtDataControlSourceV1ErrorInvalidOffer : offer sent after ext_data_control_device.set_selection
ExtDataControlSourceV1ErrorInvalidOffer ExtDataControlSourceV1Error = 1
)
func (e ExtDataControlSourceV1Error) Name() string {
switch e {
case ExtDataControlSourceV1ErrorInvalidOffer:
return "invalid_offer"
default:
return ""
}
}
func (e ExtDataControlSourceV1Error) Value() string {
switch e {
case ExtDataControlSourceV1ErrorInvalidOffer:
return "1"
default:
return ""
}
}
func (e ExtDataControlSourceV1Error) String() string {
return e.Name() + "=" + e.Value()
}
// ExtDataControlSourceV1SendEvent : send the data
//
// Request for data from the client. Send the data as the specified MIME
// type over the passed file descriptor, then close it.
type ExtDataControlSourceV1SendEvent struct {
MimeType string
Fd int
}
type ExtDataControlSourceV1SendHandlerFunc func(ExtDataControlSourceV1SendEvent)
// SetSendHandler : sets handler for ExtDataControlSourceV1SendEvent
func (i *ExtDataControlSourceV1) SetSendHandler(f ExtDataControlSourceV1SendHandlerFunc) {
i.sendHandler = f
}
// ExtDataControlSourceV1CancelledEvent : selection was cancelled
//
// This data source is no longer valid. The data source has been replaced
// by another data source.
//
// The client should clean up and destroy this data source.
type ExtDataControlSourceV1CancelledEvent struct{}
type ExtDataControlSourceV1CancelledHandlerFunc func(ExtDataControlSourceV1CancelledEvent)
// SetCancelledHandler : sets handler for ExtDataControlSourceV1CancelledEvent
func (i *ExtDataControlSourceV1) SetCancelledHandler(f ExtDataControlSourceV1CancelledHandlerFunc) {
i.cancelledHandler = f
}
func (i *ExtDataControlSourceV1) Dispatch(opcode uint32, fd int, data []byte) {
switch opcode {
case 0:
if i.sendHandler == nil {
if fd != -1 {
unix.Close(fd)
}
return
}
var e ExtDataControlSourceV1SendEvent
l := 0
mimeTypeLen := client.PaddedLen(int(client.Uint32(data[l : l+4])))
l += 4
e.MimeType = client.String(data[l : l+mimeTypeLen])
l += mimeTypeLen
e.Fd = fd
i.sendHandler(e)
case 1:
if i.cancelledHandler == nil {
return
}
var e ExtDataControlSourceV1CancelledEvent
i.cancelledHandler(e)
}
}
// ExtDataControlOfferV1InterfaceName is the name of the interface as it appears in the [client.Registry].
// It can be used to match the [client.RegistryGlobalEvent.Interface] in the
// [Registry.SetGlobalHandler] and can be used in [Registry.Bind] if this applies.
const ExtDataControlOfferV1InterfaceName = "ext_data_control_offer_v1"
// ExtDataControlOfferV1 : offer to transfer data
//
// A ext_data_control_offer represents a piece of data offered for transfer
// by another client (the source client). The offer describes the different
// MIME types that the data can be converted to and provides the mechanism
// for transferring the data directly from the source client.
type ExtDataControlOfferV1 struct {
client.BaseProxy
offerHandler ExtDataControlOfferV1OfferHandlerFunc
}
// NewExtDataControlOfferV1 : offer to transfer data
//
// A ext_data_control_offer represents a piece of data offered for transfer
// by another client (the source client). The offer describes the different
// MIME types that the data can be converted to and provides the mechanism
// for transferring the data directly from the source client.
func NewExtDataControlOfferV1(ctx *client.Context) *ExtDataControlOfferV1 {
extDataControlOfferV1 := &ExtDataControlOfferV1{}
ctx.Register(extDataControlOfferV1)
return extDataControlOfferV1
}
// Receive : request that the data is transferred
//
// To transfer the offered data, the client issues this request and
// indicates the MIME type it wants to receive. The transfer happens
// through the passed file descriptor (typically created with the pipe
// system call). The source client writes the data in the MIME type
// representation requested and then closes the file descriptor.
//
// The receiving client reads from the read end of the pipe until EOF and
// then closes its end, at which point the transfer is complete.
//
// This request may happen multiple times for different MIME types.
//
// mimeType: MIME type desired by receiver
// fd: file descriptor for data transfer
func (i *ExtDataControlOfferV1) Receive(mimeType string, fd int) error {
const opcode = 0
mimeTypeLen := client.PaddedLen(len(mimeType) + 1)
_reqBufLen := 8 + (4 + mimeTypeLen)
_reqBuf := make([]byte, _reqBufLen)
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
client.PutString(_reqBuf[l:l+(4+mimeTypeLen)], mimeType)
l += (4 + mimeTypeLen)
oob := unix.UnixRights(int(fd))
err := i.Context().WriteMsg(_reqBuf, oob)
return err
}
// Destroy : destroy this offer
//
// Destroys the data offer object.
func (i *ExtDataControlOfferV1) Destroy() error {
defer i.MarkZombie()
const opcode = 1
const _reqBufLen = 8
var _reqBuf [_reqBufLen]byte
l := 0
client.PutUint32(_reqBuf[l:4], i.ID())
l += 4
client.PutUint32(_reqBuf[l:l+4], uint32(_reqBufLen<<16|opcode&0x0000ffff))
l += 4
err := i.Context().WriteMsg(_reqBuf[:], nil)
return err
}
// ExtDataControlOfferV1OfferEvent : advertise offered MIME type
//
// Sent immediately after creating the ext_data_control_offer object.
// One event per offered MIME type.
type ExtDataControlOfferV1OfferEvent struct {
MimeType string
}
type ExtDataControlOfferV1OfferHandlerFunc func(ExtDataControlOfferV1OfferEvent)
// SetOfferHandler : sets handler for ExtDataControlOfferV1OfferEvent
func (i *ExtDataControlOfferV1) SetOfferHandler(f ExtDataControlOfferV1OfferHandlerFunc) {
i.offerHandler = f
}
func (i *ExtDataControlOfferV1) Dispatch(opcode uint32, fd int, data []byte) {
switch opcode {
case 0:
if i.offerHandler == nil {
return
}
var e ExtDataControlOfferV1OfferEvent
l := 0
mimeTypeLen := client.PaddedLen(int(client.Uint32(data[l : l+4])))
l += 4
e.MimeType = client.String(data[l : l+mimeTypeLen])
l += mimeTypeLen
i.offerHandler(e)
}
}
@@ -0,0 +1,276 @@
<?xml version="1.0" encoding="UTF-8"?>
<protocol name="ext_data_control_v1">
<copyright>
Copyright © 2018 Simon Ser
Copyright © 2019 Ivan Molodetskikh
Copyright © 2024 Neal Gompa
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that copyright notice and this permission
notice appear in supporting documentation, and that the name of
the copyright holders not be used in advertising or publicity
pertaining to distribution of the software without specific,
written prior permission. The copyright holders make no
representations about the suitability of this software for any
purpose. It is provided "as is" without express or implied
warranty.
THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
</copyright>
<description summary="control data devices">
This protocol allows a privileged client to control data devices. In
particular, the client will be able to manage the current selection and take
the role of a clipboard manager.
Warning! The protocol described in this file is currently in the testing
phase. Backward compatible changes may be added together with the
corresponding interface version bump. Backward incompatible changes can
only be done by creating a new major version of the extension.
</description>
<interface name="ext_data_control_manager_v1" version="1">
<description summary="manager to control data devices">
This interface is a manager that allows creating per-seat data device
controls.
</description>
<request name="create_data_source">
<description summary="create a new data source">
Create a new data source.
</description>
<arg name="id" type="new_id" interface="ext_data_control_source_v1"
summary="data source to create"/>
</request>
<request name="get_data_device">
<description summary="get a data device for a seat">
Create a data device that can be used to manage a seat's selection.
</description>
<arg name="id" type="new_id" interface="ext_data_control_device_v1"/>
<arg name="seat" type="object" interface="wl_seat"/>
</request>
<request name="destroy" type="destructor">
<description summary="destroy the manager">
All objects created by the manager will still remain valid, until their
appropriate destroy request has been called.
</description>
</request>
</interface>
<interface name="ext_data_control_device_v1" version="1">
<description summary="manage a data device for a seat">
This interface allows a client to manage a seat's selection.
When the seat is destroyed, this object becomes inert.
</description>
<request name="set_selection">
<description summary="copy data to the selection">
This request asks the compositor to set the selection to the data from
the source on behalf of the client.
The given source may not be used in any further set_selection or
set_primary_selection requests. Attempting to use a previously used
source triggers the used_source protocol error.
To unset the selection, set the source to NULL.
</description>
<arg name="source" type="object" interface="ext_data_control_source_v1"
allow-null="true"/>
</request>
<request name="destroy" type="destructor">
<description summary="destroy this data device">
Destroys the data device object.
</description>
</request>
<event name="data_offer">
<description summary="introduce a new ext_data_control_offer">
The data_offer event introduces a new ext_data_control_offer object,
which will subsequently be used in either the
ext_data_control_device.selection event (for the regular clipboard
selections) or the ext_data_control_device.primary_selection event (for
the primary clipboard selections). Immediately following the
ext_data_control_device.data_offer event, the new data_offer object
will send out ext_data_control_offer.offer events to describe the MIME
types it offers.
</description>
<arg name="id" type="new_id" interface="ext_data_control_offer_v1"/>
</event>
<event name="selection">
<description summary="advertise new selection">
The selection event is sent out to notify the client of a new
ext_data_control_offer for the selection for this device. The
ext_data_control_device.data_offer and the ext_data_control_offer.offer
events are sent out immediately before this event to introduce the data
offer object. The selection event is sent to a client when a new
selection is set. The ext_data_control_offer is valid until a new
ext_data_control_offer or NULL is received. The client must destroy the
previous selection ext_data_control_offer, if any, upon receiving this
event. Regardless, the previous selection will be ignored once a new
selection ext_data_control_offer is received.
The first selection event is sent upon binding the
ext_data_control_device object.
</description>
<arg name="id" type="object" interface="ext_data_control_offer_v1"
allow-null="true"/>
</event>
<event name="finished">
<description summary="this data control is no longer valid">
This data control object is no longer valid and should be destroyed by
the client.
</description>
</event>
<event name="primary_selection">
<description summary="advertise new primary selection">
The primary_selection event is sent out to notify the client of a new
ext_data_control_offer for the primary selection for this device. The
ext_data_control_device.data_offer and the ext_data_control_offer.offer
events are sent out immediately before this event to introduce the data
offer object. The primary_selection event is sent to a client when a
new primary selection is set. The ext_data_control_offer is valid until
a new ext_data_control_offer or NULL is received. The client must
destroy the previous primary selection ext_data_control_offer, if any,
upon receiving this event. Regardless, the previous primary selection
will be ignored once a new primary selection ext_data_control_offer is
received.
If the compositor supports primary selection, the first
primary_selection event is sent upon binding the
ext_data_control_device object.
</description>
<arg name="id" type="object" interface="ext_data_control_offer_v1"
allow-null="true"/>
</event>
<request name="set_primary_selection">
<description summary="copy data to the primary selection">
This request asks the compositor to set the primary selection to the
data from the source on behalf of the client.
The given source may not be used in any further set_selection or
set_primary_selection requests. Attempting to use a previously used
source triggers the used_source protocol error.
To unset the primary selection, set the source to NULL.
The compositor will ignore this request if it does not support primary
selection.
</description>
<arg name="source" type="object" interface="ext_data_control_source_v1"
allow-null="true"/>
</request>
<enum name="error">
<entry name="used_source" value="1"
summary="source given to set_selection or set_primary_selection was already used before"/>
</enum>
</interface>
<interface name="ext_data_control_source_v1" version="1">
<description summary="offer to transfer data">
The ext_data_control_source object is the source side of a
ext_data_control_offer. It is created by the source client in a data
transfer and provides a way to describe the offered data and a way to
respond to requests to transfer the data.
</description>
<enum name="error">
<entry name="invalid_offer" value="1"
summary="offer sent after ext_data_control_device.set_selection"/>
</enum>
<request name="offer">
<description summary="add an offered MIME type">
This request adds a MIME type to the set of MIME types advertised to
targets. Can be called several times to offer multiple types.
Calling this after ext_data_control_device.set_selection is a protocol
error.
</description>
<arg name="mime_type" type="string"
summary="MIME type offered by the data source"/>
</request>
<request name="destroy" type="destructor">
<description summary="destroy this source">
Destroys the data source object.
</description>
</request>
<event name="send">
<description summary="send the data">
Request for data from the client. Send the data as the specified MIME
type over the passed file descriptor, then close it.
</description>
<arg name="mime_type" type="string" summary="MIME type for the data"/>
<arg name="fd" type="fd" summary="file descriptor for the data"/>
</event>
<event name="cancelled">
<description summary="selection was cancelled">
This data source is no longer valid. The data source has been replaced
by another data source.
The client should clean up and destroy this data source.
</description>
</event>
</interface>
<interface name="ext_data_control_offer_v1" version="1">
<description summary="offer to transfer data">
A ext_data_control_offer represents a piece of data offered for transfer
by another client (the source client). The offer describes the different
MIME types that the data can be converted to and provides the mechanism
for transferring the data directly from the source client.
</description>
<request name="receive">
<description summary="request that the data is transferred">
To transfer the offered data, the client issues this request and
indicates the MIME type it wants to receive. The transfer happens
through the passed file descriptor (typically created with the pipe
system call). The source client writes the data in the MIME type
representation requested and then closes the file descriptor.
The receiving client reads from the read end of the pipe until EOF and
then closes its end, at which point the transfer is complete.
This request may happen multiple times for different MIME types.
</description>
<arg name="mime_type" type="string"
summary="MIME type desired by receiver"/>
<arg name="fd" type="fd" summary="file descriptor for data transfer"/>
</request>
<request name="destroy" type="destructor">
<description summary="destroy this offer">
Destroys the data offer object.
</description>
</request>
<event name="offer">
<description summary="advertise offered MIME type">
Sent immediately after creating the ext_data_control_offer object.
One event per offered MIME type.
</description>
<arg name="mime_type" type="string" summary="offered MIME type"/>
</event>
</interface>
</protocol>
+7 -1
View File
@@ -12,6 +12,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/AvengeMedia/DankMaterialShell/core/internal/log"
"github.com/AvengeMedia/DankMaterialShell/core/internal/utils" "github.com/AvengeMedia/DankMaterialShell/core/internal/utils"
) )
@@ -119,7 +120,12 @@ func GetOutputDir() string {
} }
func getXDGPicturesDir() string { func getXDGPicturesDir() string {
userDirsFile := filepath.Join(utils.XDGConfigHome(), "user-dirs.dirs") userConfigDir, err := os.UserConfigDir()
if err != nil {
log.Error("failed to get user config dir", "err", err)
return ""
}
userDirsFile := filepath.Join(userConfigDir, "user-dirs.dirs")
data, err := os.ReadFile(userDirsFile) data, err := os.ReadFile(userDirsFile)
if err != nil { if err != nil {
return "" return ""
+7 -2
View File
@@ -7,7 +7,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/AvengeMedia/DankMaterialShell/core/internal/utils" "github.com/AvengeMedia/DankMaterialShell/core/internal/log"
) )
type ThemeColors struct { type ThemeColors struct {
@@ -74,7 +74,12 @@ func loadColorsFile() *ColorScheme {
} }
func getColorsFilePath() string { func getColorsFilePath() string {
return filepath.Join(utils.XDGCacheHome(), "DankMaterialShell", "dms-colors.json") cacheDir, err := os.UserCacheDir()
if err != nil {
log.Error("Failed to get user cache dir", "err", err)
return ""
}
return filepath.Join(cacheDir, "DankMaterialShell", "dms-colors.json")
} }
func isLightMode() bool { func isLightMode() bool {
+238
View File
@@ -0,0 +1,238 @@
package clipboard
import (
"encoding/json"
"fmt"
"net"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/models"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/params"
)
func HandleRequest(conn net.Conn, req models.Request, m *Manager) {
switch req.Method {
case "clipboard.getState":
handleGetState(conn, req, m)
case "clipboard.getHistory":
handleGetHistory(conn, req, m)
case "clipboard.getEntry":
handleGetEntry(conn, req, m)
case "clipboard.deleteEntry":
handleDeleteEntry(conn, req, m)
case "clipboard.clearHistory":
handleClearHistory(conn, req, m)
case "clipboard.copy":
handleCopy(conn, req, m)
case "clipboard.copyEntry":
handleCopyEntry(conn, req, m)
case "clipboard.paste":
handlePaste(conn, req, m)
case "clipboard.subscribe":
handleSubscribe(conn, req, m)
case "clipboard.search":
handleSearch(conn, req, m)
case "clipboard.getConfig":
handleGetConfig(conn, req, m)
case "clipboard.setConfig":
handleSetConfig(conn, req, m)
case "clipboard.store":
handleStore(conn, req, m)
default:
models.RespondError(conn, req.ID, "unknown method: "+req.Method)
}
}
func handleGetState(conn net.Conn, req models.Request, m *Manager) {
models.Respond(conn, req.ID, m.GetState())
}
func handleGetHistory(conn net.Conn, req models.Request, m *Manager) {
history := m.GetHistory()
for i := range history {
history[i].Data = nil
}
models.Respond(conn, req.ID, history)
}
func handleGetEntry(conn net.Conn, req models.Request, m *Manager) {
id, err := params.Int(req.Params, "id")
if err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
entry, err := m.GetEntry(uint64(id))
if err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
models.Respond(conn, req.ID, entry)
}
func handleDeleteEntry(conn net.Conn, req models.Request, m *Manager) {
id, err := params.Int(req.Params, "id")
if err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
if err := m.DeleteEntry(uint64(id)); err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
models.Respond(conn, req.ID, models.SuccessResult{Success: true, Message: "entry deleted"})
}
func handleClearHistory(conn net.Conn, req models.Request, m *Manager) {
m.ClearHistory()
models.Respond(conn, req.ID, models.SuccessResult{Success: true, Message: "history cleared"})
}
func handleCopy(conn net.Conn, req models.Request, m *Manager) {
text, err := params.String(req.Params, "text")
if err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
if err := m.CopyText(text); err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
models.Respond(conn, req.ID, models.SuccessResult{Success: true, Message: "copied to clipboard"})
}
func handleCopyEntry(conn net.Conn, req models.Request, m *Manager) {
id, err := params.Int(req.Params, "id")
if err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
entry, err := m.GetEntry(uint64(id))
if err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
if err := m.SetClipboard(entry.Data, entry.MimeType); err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
models.Respond(conn, req.ID, models.SuccessResult{Success: true, Message: "copied to clipboard"})
}
func handlePaste(conn net.Conn, req models.Request, m *Manager) {
text, err := m.PasteText()
if err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
models.Respond(conn, req.ID, map[string]string{"text": text})
}
func handleSubscribe(conn net.Conn, req models.Request, m *Manager) {
clientID := fmt.Sprintf("clipboard-%d", req.ID)
ch := m.Subscribe(clientID)
defer m.Unsubscribe(clientID)
initialState := m.GetState()
if err := json.NewEncoder(conn).Encode(models.Response[State]{
ID: req.ID,
Result: &initialState,
}); err != nil {
return
}
for state := range ch {
if err := json.NewEncoder(conn).Encode(models.Response[State]{
ID: req.ID,
Result: &state,
}); err != nil {
return
}
}
}
func handleSearch(conn net.Conn, req models.Request, m *Manager) {
p := SearchParams{
Query: params.StringOpt(req.Params, "query", ""),
MimeType: params.StringOpt(req.Params, "mimeType", ""),
Limit: params.IntOpt(req.Params, "limit", 50),
Offset: params.IntOpt(req.Params, "offset", 0),
}
if img, ok := req.Params["isImage"].(bool); ok {
p.IsImage = &img
}
if b, ok := req.Params["before"].(float64); ok {
v := int64(b)
p.Before = &v
}
if a, ok := req.Params["after"].(float64); ok {
v := int64(a)
p.After = &v
}
models.Respond(conn, req.ID, m.Search(p))
}
func handleGetConfig(conn net.Conn, req models.Request, m *Manager) {
models.Respond(conn, req.ID, m.GetConfig())
}
func handleSetConfig(conn net.Conn, req models.Request, m *Manager) {
cfg := m.GetConfig()
if _, ok := req.Params["maxHistory"]; ok {
cfg.MaxHistory = params.IntOpt(req.Params, "maxHistory", cfg.MaxHistory)
}
if _, ok := req.Params["maxEntrySize"]; ok {
cfg.MaxEntrySize = int64(params.IntOpt(req.Params, "maxEntrySize", int(cfg.MaxEntrySize)))
}
if _, ok := req.Params["autoClearDays"]; ok {
cfg.AutoClearDays = params.IntOpt(req.Params, "autoClearDays", cfg.AutoClearDays)
}
if v, ok := req.Params["clearAtStartup"].(bool); ok {
cfg.ClearAtStartup = v
}
if v, ok := req.Params["disabled"].(bool); ok {
cfg.Disabled = v
}
if v, ok := req.Params["disableHistory"].(bool); ok {
cfg.DisableHistory = v
}
if v, ok := req.Params["disablePersist"].(bool); ok {
cfg.DisablePersist = v
}
if err := m.SetConfig(cfg); err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
models.Respond(conn, req.ID, models.SuccessResult{Success: true, Message: "config updated"})
}
func handleStore(conn net.Conn, req models.Request, m *Manager) {
data, err := params.String(req.Params, "data")
if err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
mimeType := params.StringOpt(req.Params, "mimeType", "text/plain;charset=utf-8")
if err := m.StoreData([]byte(data), mimeType); err != nil {
models.RespondError(conn, req.ID, err.Error())
return
}
models.Respond(conn, req.ID, models.SuccessResult{Success: true, Message: "stored"})
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,531 @@
package clipboard
import (
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
mocks_wlcontext "github.com/AvengeMedia/DankMaterialShell/core/internal/mocks/wlcontext"
)
func TestEncodeDecodeEntry_Roundtrip(t *testing.T) {
original := Entry{
ID: 12345,
Data: []byte("hello world"),
MimeType: "text/plain;charset=utf-8",
Preview: "hello world",
Size: 11,
Timestamp: time.Now().Truncate(time.Second),
IsImage: false,
}
encoded, err := encodeEntry(original)
assert.NoError(t, err)
decoded, err := decodeEntry(encoded)
assert.NoError(t, err)
assert.Equal(t, original.ID, decoded.ID)
assert.Equal(t, original.Data, decoded.Data)
assert.Equal(t, original.MimeType, decoded.MimeType)
assert.Equal(t, original.Preview, decoded.Preview)
assert.Equal(t, original.Size, decoded.Size)
assert.Equal(t, original.Timestamp.Unix(), decoded.Timestamp.Unix())
assert.Equal(t, original.IsImage, decoded.IsImage)
}
func TestEncodeDecodeEntry_Image(t *testing.T) {
original := Entry{
ID: 99999,
Data: []byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A},
MimeType: "image/png",
Preview: "[[ image 8 B png 100x100 ]]",
Size: 8,
Timestamp: time.Now().Truncate(time.Second),
IsImage: true,
}
encoded, err := encodeEntry(original)
assert.NoError(t, err)
decoded, err := decodeEntry(encoded)
assert.NoError(t, err)
assert.Equal(t, original.ID, decoded.ID)
assert.Equal(t, original.Data, decoded.Data)
assert.True(t, decoded.IsImage)
assert.Equal(t, original.Preview, decoded.Preview)
}
func TestEncodeDecodeEntry_EmptyData(t *testing.T) {
original := Entry{
ID: 1,
Data: []byte{},
MimeType: "text/plain",
Preview: "",
Size: 0,
Timestamp: time.Now().Truncate(time.Second),
IsImage: false,
}
encoded, err := encodeEntry(original)
assert.NoError(t, err)
decoded, err := decodeEntry(encoded)
assert.NoError(t, err)
assert.Equal(t, original.ID, decoded.ID)
assert.Empty(t, decoded.Data)
}
func TestEncodeDecodeEntry_LargeData(t *testing.T) {
largeData := make([]byte, 100000)
for i := range largeData {
largeData[i] = byte(i % 256)
}
original := Entry{
ID: 777,
Data: largeData,
MimeType: "application/octet-stream",
Preview: "binary data...",
Size: len(largeData),
Timestamp: time.Now().Truncate(time.Second),
IsImage: false,
}
encoded, err := encodeEntry(original)
assert.NoError(t, err)
decoded, err := decodeEntry(encoded)
assert.NoError(t, err)
assert.Equal(t, original.Data, decoded.Data)
assert.Equal(t, original.Size, decoded.Size)
}
func TestStateEqual_BothNil(t *testing.T) {
assert.False(t, stateEqual(nil, nil))
}
func TestStateEqual_OneNil(t *testing.T) {
s := &State{Enabled: true}
assert.False(t, stateEqual(s, nil))
assert.False(t, stateEqual(nil, s))
}
func TestStateEqual_EnabledDiffers(t *testing.T) {
a := &State{Enabled: true, History: []Entry{}}
b := &State{Enabled: false, History: []Entry{}}
assert.False(t, stateEqual(a, b))
}
func TestStateEqual_HistoryLengthDiffers(t *testing.T) {
a := &State{Enabled: true, History: []Entry{{ID: 1}}}
b := &State{Enabled: true, History: []Entry{}}
assert.False(t, stateEqual(a, b))
}
func TestStateEqual_BothEqual(t *testing.T) {
a := &State{Enabled: true, History: []Entry{{ID: 1}, {ID: 2}}}
b := &State{Enabled: true, History: []Entry{{ID: 3}, {ID: 4}}}
assert.True(t, stateEqual(a, b))
}
func TestManager_ConcurrentSubscriberAccess(t *testing.T) {
m := &Manager{
subscribers: make(map[string]chan State),
dirty: make(chan struct{}, 1),
}
var wg sync.WaitGroup
const goroutines = 20
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
subID := string(rune('a' + id))
ch := m.Subscribe(subID)
assert.NotNil(t, ch)
time.Sleep(time.Millisecond)
m.Unsubscribe(subID)
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentGetState(t *testing.T) {
m := &Manager{
state: &State{
Enabled: true,
History: []Entry{{ID: 1}, {ID: 2}},
},
}
var wg sync.WaitGroup
const goroutines = 50
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
s := m.GetState()
_ = s.Enabled
_ = len(s.History)
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.stateMutex.Lock()
m.state = &State{
Enabled: j%2 == 0,
History: []Entry{{ID: uint64(j)}},
}
m.stateMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentConfigAccess(t *testing.T) {
m := &Manager{
config: DefaultConfig(),
}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
cfg := m.getConfig()
_ = cfg.MaxHistory
_ = cfg.MaxEntrySize
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.configMutex.Lock()
m.config.MaxHistory = 50 + j
m.config.MaxEntrySize = int64(1024 * j)
m.configMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_NotifySubscribersNonBlocking(t *testing.T) {
m := &Manager{
dirty: make(chan struct{}, 1),
}
for i := 0; i < 10; i++ {
m.notifySubscribers()
}
assert.Len(t, m.dirty, 1)
}
func TestManager_ConcurrentOfferAccess(t *testing.T) {
m := &Manager{
offerMimeTypes: make(map[any][]string),
offerRegistry: make(map[uint32]any),
}
var wg sync.WaitGroup
const goroutines = 20
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := uint32(id)
for j := 0; j < iterations; j++ {
m.offerMutex.Lock()
m.offerRegistry[key] = struct{}{}
m.offerMimeTypes[key] = []string{"text/plain"}
m.offerMutex.Unlock()
m.offerMutex.RLock()
_ = m.offerRegistry[key]
_ = m.offerMimeTypes[key]
m.offerMutex.RUnlock()
m.offerMutex.Lock()
delete(m.offerRegistry, key)
delete(m.offerMimeTypes, key)
m.offerMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentPersistAccess(t *testing.T) {
m := &Manager{
persistData: make(map[string][]byte),
persistMimeTypes: []string{},
}
var wg sync.WaitGroup
const goroutines = 20
const iterations = 50
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.persistMutex.RLock()
_ = m.persistData
_ = m.persistMimeTypes
m.persistMutex.RUnlock()
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.persistMutex.Lock()
m.persistMimeTypes = []string{"text/plain", "text/html"}
m.persistData = map[string][]byte{
"text/plain": []byte("test"),
}
m.persistMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentOwnerAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.ownerLock.Lock()
_ = m.isOwner
m.ownerLock.Unlock()
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.ownerLock.Lock()
m.isOwner = j%2 == 0
m.ownerLock.Unlock()
}
}()
}
wg.Wait()
}
func TestItob(t *testing.T) {
tests := []struct {
input uint64
expected []byte
}{
{0, []byte{0, 0, 0, 0, 0, 0, 0, 0}},
{1, []byte{0, 0, 0, 0, 0, 0, 0, 1}},
{256, []byte{0, 0, 0, 0, 0, 0, 1, 0}},
{0xFFFFFFFFFFFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
}
for _, tt := range tests {
result := itob(tt.input)
assert.Equal(t, tt.expected, result)
}
}
func TestSizeStr(t *testing.T) {
tests := []struct {
input int
expected string
}{
{0, "0 B"},
{100, "100 B"},
{1024, "1 KiB"},
{2048, "2 KiB"},
{1048576, "1 MiB"},
{5242880, "5 MiB"},
}
for _, tt := range tests {
result := sizeStr(tt.input)
assert.Equal(t, tt.expected, result)
}
}
func TestSelectMimeType(t *testing.T) {
m := &Manager{}
tests := []struct {
mimes []string
expected string
}{
{[]string{"text/plain;charset=utf-8", "text/html"}, "text/plain;charset=utf-8"},
{[]string{"text/html", "text/plain"}, "text/plain"},
{[]string{"application/json", "image/png"}, "image/png"},
{[]string{"application/json", "application/xml"}, ""},
{[]string{}, ""},
}
for _, tt := range tests {
result := m.selectMimeType(tt.mimes)
assert.Equal(t, tt.expected, result)
}
}
func TestIsImageMimeType(t *testing.T) {
m := &Manager{}
assert.True(t, m.isImageMimeType("image/png"))
assert.True(t, m.isImageMimeType("image/jpeg"))
assert.True(t, m.isImageMimeType("image/gif"))
assert.False(t, m.isImageMimeType("text/plain"))
assert.False(t, m.isImageMimeType("application/json"))
}
func TestTextPreview(t *testing.T) {
m := &Manager{}
short := m.textPreview([]byte("hello world"))
assert.Equal(t, "hello world", short)
withWhitespace := m.textPreview([]byte(" hello world "))
assert.Equal(t, "hello world", withWhitespace)
longText := make([]byte, 200)
for i := range longText {
longText[i] = 'a'
}
preview := m.textPreview(longText)
assert.True(t, len(preview) > 100)
assert.Contains(t, preview, "…")
}
func TestDefaultConfig(t *testing.T) {
cfg := DefaultConfig()
assert.Equal(t, 100, cfg.MaxHistory)
assert.Equal(t, int64(5*1024*1024), cfg.MaxEntrySize)
assert.Equal(t, 0, cfg.AutoClearDays)
assert.False(t, cfg.ClearAtStartup)
assert.False(t, cfg.Disabled)
assert.False(t, cfg.DisableHistory)
assert.True(t, cfg.DisablePersist)
}
func TestManager_PostDelegatesToWlContext(t *testing.T) {
mockCtx := mocks_wlcontext.NewMockWaylandContext(t)
var called atomic.Bool
mockCtx.EXPECT().Post(mock.AnythingOfType("func()")).Run(func(fn func()) {
called.Store(true)
fn()
}).Once()
m := &Manager{
wlCtx: mockCtx,
}
executed := false
m.post(func() {
executed = true
})
assert.True(t, called.Load())
assert.True(t, executed)
}
func TestManager_PostExecutesFunctionViaContext(t *testing.T) {
mockCtx := mocks_wlcontext.NewMockWaylandContext(t)
var capturedFn func()
mockCtx.EXPECT().Post(mock.AnythingOfType("func()")).Run(func(fn func()) {
capturedFn = fn
}).Times(3)
m := &Manager{
wlCtx: mockCtx,
}
counter := 0
m.post(func() { counter++ })
m.post(func() { counter += 10 })
m.post(func() { counter += 100 })
assert.NotNil(t, capturedFn)
capturedFn()
assert.Equal(t, 100, counter)
}
func TestManager_ConcurrentPostWithMock(t *testing.T) {
mockCtx := mocks_wlcontext.NewMockWaylandContext(t)
var postCount atomic.Int32
mockCtx.EXPECT().Post(mock.AnythingOfType("func()")).Run(func(fn func()) {
postCount.Add(1)
}).Times(100)
m := &Manager{
wlCtx: mockCtx,
}
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 10; j++ {
m.post(func() {})
}
}()
}
wg.Wait()
assert.Equal(t, int32(100), postCount.Load())
}
+192
View File
@@ -0,0 +1,192 @@
package clipboard
import (
"encoding/json"
"os"
"path/filepath"
"sync"
"time"
bolt "go.etcd.io/bbolt"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/wlcontext"
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
)
type Config struct {
MaxHistory int `json:"maxHistory"`
MaxEntrySize int64 `json:"maxEntrySize"`
AutoClearDays int `json:"autoClearDays"`
ClearAtStartup bool `json:"clearAtStartup"`
Disabled bool `json:"disabled"`
DisableHistory bool `json:"disableHistory"`
DisablePersist bool `json:"disablePersist"`
}
func DefaultConfig() Config {
return Config{
MaxHistory: 100,
MaxEntrySize: 5 * 1024 * 1024,
AutoClearDays: 0,
ClearAtStartup: false,
DisablePersist: true,
}
}
func getConfigPath() (string, error) {
configDir, err := os.UserConfigDir()
if err != nil {
return "", err
}
return filepath.Join(configDir, "DankMaterialShell", "clsettings.json"), nil
}
func LoadConfig() Config {
cfg := DefaultConfig()
path, err := getConfigPath()
if err != nil {
return cfg
}
data, err := os.ReadFile(path)
if err != nil {
return cfg
}
if err := json.Unmarshal(data, &cfg); err != nil {
return DefaultConfig()
}
return cfg
}
func SaveConfig(cfg Config) error {
path, err := getConfigPath()
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
data, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return err
}
return os.WriteFile(path, data, 0644)
}
type SearchParams struct {
Query string `json:"query"`
MimeType string `json:"mimeType"`
IsImage *bool `json:"isImage"`
Limit int `json:"limit"`
Offset int `json:"offset"`
Before *int64 `json:"before"`
After *int64 `json:"after"`
}
type SearchResult struct {
Entries []Entry `json:"entries"`
Total int `json:"total"`
HasMore bool `json:"hasMore"`
}
type Entry struct {
ID uint64 `json:"id"`
Data []byte `json:"data,omitempty"`
MimeType string `json:"mimeType"`
Preview string `json:"preview"`
Size int `json:"size"`
Timestamp time.Time `json:"timestamp"`
IsImage bool `json:"isImage"`
}
type State struct {
Enabled bool `json:"enabled"`
History []Entry `json:"history"`
Current *Entry `json:"current,omitempty"`
}
type Manager struct {
config Config
configMutex sync.RWMutex
configPath string
display wlclient.WaylandDisplay
wlCtx wlcontext.WaylandContext
registry *wlclient.Registry
dataControlMgr any
seat *wlclient.Seat
dataDevice any
currentOffer any
currentSource any
seatName uint32
mimeTypes []string
offerMimeTypes map[any][]string
offerMutex sync.RWMutex
offerRegistry map[uint32]any
sourceMimeTypes []string
sourceMutex sync.RWMutex
persistData map[string][]byte
persistMimeTypes []string
persistMutex sync.RWMutex
isOwner bool
ownerLock sync.Mutex
initialized bool
alive bool
stopChan chan struct{}
db *bolt.DB
dbPath string
state *State
stateMutex sync.RWMutex
subscribers map[string]chan State
subMutex sync.RWMutex
dirty chan struct{}
notifierWg sync.WaitGroup
lastState *State
}
func (m *Manager) GetState() State {
m.stateMutex.RLock()
defer m.stateMutex.RUnlock()
if m.state == nil {
return State{}
}
return *m.state
}
func (m *Manager) Subscribe(id string) chan State {
ch := make(chan State, 64)
m.subMutex.Lock()
m.subscribers[id] = ch
m.subMutex.Unlock()
return ch
}
func (m *Manager) Unsubscribe(id string) {
m.subMutex.Lock()
if ch, ok := m.subscribers[id]; ok {
close(ch)
delete(m.subscribers, id)
}
m.subMutex.Unlock()
}
func (m *Manager) notifySubscribers() {
select {
case m.dirty <- struct{}{}:
default:
}
}
+1 -1
View File
@@ -10,7 +10,7 @@ import (
"github.com/AvengeMedia/DankMaterialShell/core/internal/proto/dwl_ipc" "github.com/AvengeMedia/DankMaterialShell/core/internal/proto/dwl_ipc"
) )
func NewManager(display *wlclient.Display) (*Manager, error) { func NewManager(display wlclient.WaylandDisplay) (*Manager, error) {
m := &Manager{ m := &Manager{
display: display, display: display,
ctx: display.Context(), ctx: display.Context(),
+366
View File
@@ -0,0 +1,366 @@
package dwl
import (
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
mocks_wlclient "github.com/AvengeMedia/DankMaterialShell/core/internal/mocks/wlclient"
)
func TestStateChanged_BothNil(t *testing.T) {
assert.True(t, stateChanged(nil, nil))
}
func TestStateChanged_OneNil(t *testing.T) {
s := &State{TagCount: 9}
assert.True(t, stateChanged(s, nil))
assert.True(t, stateChanged(nil, s))
}
func TestStateChanged_TagCountDiffers(t *testing.T) {
a := &State{TagCount: 9, Outputs: make(map[string]*OutputState), Layouts: []string{}}
b := &State{TagCount: 10, Outputs: make(map[string]*OutputState), Layouts: []string{}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_LayoutLengthDiffers(t *testing.T) {
a := &State{TagCount: 9, Layouts: []string{"tile"}, Outputs: make(map[string]*OutputState)}
b := &State{TagCount: 9, Layouts: []string{"tile", "monocle"}, Outputs: make(map[string]*OutputState)}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_ActiveOutputDiffers(t *testing.T) {
a := &State{TagCount: 9, ActiveOutput: "eDP-1", Outputs: make(map[string]*OutputState), Layouts: []string{}}
b := &State{TagCount: 9, ActiveOutput: "HDMI-A-1", Outputs: make(map[string]*OutputState), Layouts: []string{}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputCountDiffers(t *testing.T) {
a := &State{
TagCount: 9,
Outputs: map[string]*OutputState{"eDP-1": {}},
Layouts: []string{},
}
b := &State{
TagCount: 9,
Outputs: map[string]*OutputState{},
Layouts: []string{},
}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputFieldsDiffer(t *testing.T) {
a := &State{
TagCount: 9,
Layouts: []string{},
Outputs: map[string]*OutputState{
"eDP-1": {Active: 1, Layout: 0, Title: "Firefox"},
},
}
b := &State{
TagCount: 9,
Layouts: []string{},
Outputs: map[string]*OutputState{
"eDP-1": {Active: 0, Layout: 0, Title: "Firefox"},
},
}
assert.True(t, stateChanged(a, b))
b.Outputs["eDP-1"].Active = 1
b.Outputs["eDP-1"].Layout = 1
assert.True(t, stateChanged(a, b))
b.Outputs["eDP-1"].Layout = 0
b.Outputs["eDP-1"].Title = "Code"
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_TagsDiffer(t *testing.T) {
a := &State{
TagCount: 9,
Layouts: []string{},
Outputs: map[string]*OutputState{
"eDP-1": {Tags: []TagState{{Tag: 1, State: 1, Clients: 2, Focused: 1}}},
},
}
b := &State{
TagCount: 9,
Layouts: []string{},
Outputs: map[string]*OutputState{
"eDP-1": {Tags: []TagState{{Tag: 1, State: 2, Clients: 2, Focused: 1}}},
},
}
assert.True(t, stateChanged(a, b))
b.Outputs["eDP-1"].Tags[0].State = 1
b.Outputs["eDP-1"].Tags[0].Clients = 3
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_Equal(t *testing.T) {
a := &State{
TagCount: 9,
ActiveOutput: "eDP-1",
Layouts: []string{"tile", "monocle"},
Outputs: map[string]*OutputState{
"eDP-1": {
Name: "eDP-1",
Active: 1,
Layout: 0,
LayoutSymbol: "[]=",
Title: "Firefox",
AppID: "firefox",
KbLayout: "us",
Keymode: "",
Tags: []TagState{{Tag: 1, State: 1, Clients: 2, Focused: 1}},
},
},
}
b := &State{
TagCount: 9,
ActiveOutput: "eDP-1",
Layouts: []string{"tile", "monocle"},
Outputs: map[string]*OutputState{
"eDP-1": {
Name: "eDP-1",
Active: 1,
Layout: 0,
LayoutSymbol: "[]=",
Title: "Firefox",
AppID: "firefox",
KbLayout: "us",
Keymode: "",
Tags: []TagState{{Tag: 1, State: 1, Clients: 2, Focused: 1}},
},
},
}
assert.False(t, stateChanged(a, b))
}
func TestManager_ConcurrentGetState(t *testing.T) {
m := &Manager{
state: &State{
TagCount: 9,
Layouts: []string{"tile"},
Outputs: map[string]*OutputState{"eDP-1": {Name: "eDP-1"}},
},
}
var wg sync.WaitGroup
const goroutines = 50
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
s := m.GetState()
_ = s.TagCount
_ = s.Outputs
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.stateMutex.Lock()
m.state = &State{
TagCount: uint32(j % 10),
Layouts: []string{"tile", "monocle"},
Outputs: map[string]*OutputState{"eDP-1": {Active: uint32(j % 2)}},
}
m.stateMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentSubscriberAccess(t *testing.T) {
m := &Manager{
stopChan: make(chan struct{}),
dirty: make(chan struct{}, 1),
}
var wg sync.WaitGroup
const goroutines = 20
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
subID := string(rune('a' + id))
ch := m.Subscribe(subID)
assert.NotNil(t, ch)
time.Sleep(time.Millisecond)
m.Unsubscribe(subID)
}(i)
}
wg.Wait()
}
func TestManager_SyncmapOutputsConcurrentAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := uint32(id)
for j := 0; j < iterations; j++ {
state := &outputState{
id: key,
name: "test-output",
active: uint32(j % 2),
tags: []TagState{{Tag: uint32(j), State: 1}},
}
m.outputs.Store(key, state)
if loaded, ok := m.outputs.Load(key); ok {
assert.Equal(t, key, loaded.id)
}
m.outputs.Range(func(k uint32, v *outputState) bool {
_ = v.name
_ = v.active
return true
})
}
m.outputs.Delete(key)
}(i)
}
wg.Wait()
}
func TestManager_NotifySubscribersNonBlocking(t *testing.T) {
m := &Manager{
dirty: make(chan struct{}, 1),
}
for i := 0; i < 10; i++ {
m.notifySubscribers()
}
assert.Len(t, m.dirty, 1)
}
func TestManager_PostQueueFull(t *testing.T) {
m := &Manager{
cmdq: make(chan cmd, 2),
stopChan: make(chan struct{}),
}
m.post(func() {})
m.post(func() {})
m.post(func() {})
m.post(func() {})
assert.Len(t, m.cmdq, 2)
}
func TestManager_GetStateNilState(t *testing.T) {
m := &Manager{}
s := m.GetState()
assert.NotNil(t, s.Outputs)
assert.NotNil(t, s.Layouts)
assert.Equal(t, uint32(0), s.TagCount)
}
func TestTagState_Fields(t *testing.T) {
tag := TagState{
Tag: 1,
State: 2,
Clients: 3,
Focused: 1,
}
assert.Equal(t, uint32(1), tag.Tag)
assert.Equal(t, uint32(2), tag.State)
assert.Equal(t, uint32(3), tag.Clients)
assert.Equal(t, uint32(1), tag.Focused)
}
func TestOutputState_Fields(t *testing.T) {
out := OutputState{
Name: "eDP-1",
Active: 1,
Tags: []TagState{{Tag: 1}},
Layout: 0,
LayoutSymbol: "[]=",
Title: "Firefox",
AppID: "firefox",
KbLayout: "us",
Keymode: "",
}
assert.Equal(t, "eDP-1", out.Name)
assert.Equal(t, uint32(1), out.Active)
assert.Len(t, out.Tags, 1)
assert.Equal(t, "[]=", out.LayoutSymbol)
}
func TestStateChanged_NewOutputAppears(t *testing.T) {
a := &State{
TagCount: 9,
Layouts: []string{},
Outputs: map[string]*OutputState{
"eDP-1": {Name: "eDP-1"},
},
}
b := &State{
TagCount: 9,
Layouts: []string{},
Outputs: map[string]*OutputState{
"eDP-1": {Name: "eDP-1"},
"HDMI-A-1": {Name: "HDMI-A-1"},
},
}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_TagsLengthDiffers(t *testing.T) {
a := &State{
TagCount: 9,
Layouts: []string{},
Outputs: map[string]*OutputState{
"eDP-1": {Tags: []TagState{{Tag: 1}}},
},
}
b := &State{
TagCount: 9,
Layouts: []string{},
Outputs: map[string]*OutputState{
"eDP-1": {Tags: []TagState{{Tag: 1}, {Tag: 2}}},
},
}
assert.True(t, stateChanged(a, b))
}
func TestNewManager_GetRegistryError(t *testing.T) {
mockDisplay := mocks_wlclient.NewMockWaylandDisplay(t)
mockDisplay.EXPECT().Context().Return(nil)
mockDisplay.EXPECT().GetRegistry().Return(nil, errors.New("failed to get registry"))
_, err := NewManager(mockDisplay)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to get registry")
}
+1 -1
View File
@@ -38,7 +38,7 @@ type cmd struct {
} }
type Manager struct { type Manager struct {
display *wlclient.Display display wlclient.WaylandDisplay
ctx *wlclient.Context ctx *wlclient.Context
registry *wlclient.Registry registry *wlclient.Registry
manager any manager any
+1 -1
View File
@@ -38,7 +38,7 @@ func CheckCapability() bool {
return found return found
} }
func NewManager(display *wlclient.Display) (*Manager, error) { func NewManager(display wlclient.WaylandDisplay) (*Manager, error) {
m := &Manager{ m := &Manager{
display: display, display: display,
ctx: display.Context(), ctx: display.Context(),
@@ -0,0 +1,392 @@
package extworkspace
import (
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
mocks_wlclient "github.com/AvengeMedia/DankMaterialShell/core/internal/mocks/wlclient"
)
func TestStateChanged_BothNil(t *testing.T) {
assert.True(t, stateChanged(nil, nil))
}
func TestStateChanged_OneNil(t *testing.T) {
s := &State{Groups: []*WorkspaceGroup{}}
assert.True(t, stateChanged(s, nil))
assert.True(t, stateChanged(nil, s))
}
func TestStateChanged_GroupCountDiffers(t *testing.T) {
a := &State{Groups: []*WorkspaceGroup{{ID: "group-1"}}}
b := &State{Groups: []*WorkspaceGroup{}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_GroupIDDiffers(t *testing.T) {
a := &State{Groups: []*WorkspaceGroup{{ID: "group-1", Outputs: []string{}, Workspaces: []*Workspace{}}}}
b := &State{Groups: []*WorkspaceGroup{{ID: "group-2", Outputs: []string{}, Workspaces: []*Workspace{}}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputCountDiffers(t *testing.T) {
a := &State{Groups: []*WorkspaceGroup{{ID: "group-1", Outputs: []string{"eDP-1"}, Workspaces: []*Workspace{}}}}
b := &State{Groups: []*WorkspaceGroup{{ID: "group-1", Outputs: []string{}, Workspaces: []*Workspace{}}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputNameDiffers(t *testing.T) {
a := &State{Groups: []*WorkspaceGroup{{ID: "group-1", Outputs: []string{"eDP-1"}, Workspaces: []*Workspace{}}}}
b := &State{Groups: []*WorkspaceGroup{{ID: "group-1", Outputs: []string{"HDMI-A-1"}, Workspaces: []*Workspace{}}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_WorkspaceCountDiffers(t *testing.T) {
a := &State{Groups: []*WorkspaceGroup{{
ID: "group-1",
Outputs: []string{},
Workspaces: []*Workspace{{ID: "1", Name: "ws1"}},
}}}
b := &State{Groups: []*WorkspaceGroup{{
ID: "group-1",
Outputs: []string{},
Workspaces: []*Workspace{},
}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_WorkspaceFieldsDiffer(t *testing.T) {
a := &State{Groups: []*WorkspaceGroup{{
ID: "group-1",
Outputs: []string{},
Workspaces: []*Workspace{{
ID: "1", Name: "ws1", State: 0, Active: false, Urgent: false, Hidden: false,
}},
}}}
b := &State{Groups: []*WorkspaceGroup{{
ID: "group-1",
Outputs: []string{},
Workspaces: []*Workspace{{
ID: "2", Name: "ws1", State: 0, Active: false, Urgent: false, Hidden: false,
}},
}}}
assert.True(t, stateChanged(a, b))
b.Groups[0].Workspaces[0].ID = "1"
b.Groups[0].Workspaces[0].Name = "ws2"
assert.True(t, stateChanged(a, b))
b.Groups[0].Workspaces[0].Name = "ws1"
b.Groups[0].Workspaces[0].State = 1
assert.True(t, stateChanged(a, b))
b.Groups[0].Workspaces[0].State = 0
b.Groups[0].Workspaces[0].Active = true
assert.True(t, stateChanged(a, b))
b.Groups[0].Workspaces[0].Active = false
b.Groups[0].Workspaces[0].Urgent = true
assert.True(t, stateChanged(a, b))
b.Groups[0].Workspaces[0].Urgent = false
b.Groups[0].Workspaces[0].Hidden = true
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_WorkspaceCoordinatesDiffer(t *testing.T) {
a := &State{Groups: []*WorkspaceGroup{{
ID: "group-1",
Outputs: []string{},
Workspaces: []*Workspace{{
ID: "1", Name: "ws1", Coordinates: []uint32{0, 0},
}},
}}}
b := &State{Groups: []*WorkspaceGroup{{
ID: "group-1",
Outputs: []string{},
Workspaces: []*Workspace{{
ID: "1", Name: "ws1", Coordinates: []uint32{1, 0},
}},
}}}
assert.True(t, stateChanged(a, b))
b.Groups[0].Workspaces[0].Coordinates = []uint32{0}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_Equal(t *testing.T) {
a := &State{Groups: []*WorkspaceGroup{{
ID: "group-1",
Outputs: []string{"eDP-1", "HDMI-A-1"},
Workspaces: []*Workspace{
{ID: "1", Name: "ws1", Coordinates: []uint32{0, 0}, State: 1, Active: true},
{ID: "2", Name: "ws2", Coordinates: []uint32{1, 0}, State: 0, Active: false},
},
}}}
b := &State{Groups: []*WorkspaceGroup{{
ID: "group-1",
Outputs: []string{"eDP-1", "HDMI-A-1"},
Workspaces: []*Workspace{
{ID: "1", Name: "ws1", Coordinates: []uint32{0, 0}, State: 1, Active: true},
{ID: "2", Name: "ws2", Coordinates: []uint32{1, 0}, State: 0, Active: false},
},
}}}
assert.False(t, stateChanged(a, b))
}
func TestManager_ConcurrentGetState(t *testing.T) {
m := &Manager{
state: &State{
Groups: []*WorkspaceGroup{{ID: "group-1", Outputs: []string{"eDP-1"}}},
},
}
var wg sync.WaitGroup
const goroutines = 50
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
s := m.GetState()
_ = s.Groups
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.stateMutex.Lock()
m.state = &State{
Groups: []*WorkspaceGroup{{ID: "group-1", Outputs: []string{"eDP-1"}}},
}
m.stateMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentSubscriberAccess(t *testing.T) {
m := &Manager{
stopChan: make(chan struct{}),
dirty: make(chan struct{}, 1),
}
var wg sync.WaitGroup
const goroutines = 20
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
subID := string(rune('a' + id))
ch := m.Subscribe(subID)
assert.NotNil(t, ch)
time.Sleep(time.Millisecond)
m.Unsubscribe(subID)
}(i)
}
wg.Wait()
}
func TestManager_SyncmapGroupsConcurrentAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := uint32(id)
for j := 0; j < iterations; j++ {
state := &workspaceGroupState{
id: key,
outputIDs: map[uint32]bool{1: true},
workspaceIDs: []uint32{uint32(j)},
}
m.groups.Store(key, state)
if loaded, ok := m.groups.Load(key); ok {
assert.Equal(t, key, loaded.id)
}
m.groups.Range(func(k uint32, v *workspaceGroupState) bool {
_ = v.id
_ = v.outputIDs
return true
})
}
m.groups.Delete(key)
}(i)
}
wg.Wait()
}
func TestManager_SyncmapWorkspacesConcurrentAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := uint32(id)
for j := 0; j < iterations; j++ {
state := &workspaceState{
id: key,
workspaceID: "ws-1",
name: "workspace",
state: uint32(j % 4),
coordinates: []uint32{uint32(j), 0},
}
m.workspaces.Store(key, state)
if loaded, ok := m.workspaces.Load(key); ok {
assert.Equal(t, key, loaded.id)
}
m.workspaces.Range(func(k uint32, v *workspaceState) bool {
_ = v.name
_ = v.state
return true
})
}
m.workspaces.Delete(key)
}(i)
}
wg.Wait()
}
func TestManager_SyncmapOutputNamesConcurrentAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := uint32(id)
for j := 0; j < iterations; j++ {
m.outputNames.Store(key, "eDP-1")
if loaded, ok := m.outputNames.Load(key); ok {
assert.NotEmpty(t, loaded)
}
m.outputNames.Range(func(k uint32, v string) bool {
_ = v
return true
})
}
m.outputNames.Delete(key)
}(i)
}
wg.Wait()
}
func TestManager_NotifySubscribersNonBlocking(t *testing.T) {
m := &Manager{
dirty: make(chan struct{}, 1),
}
for i := 0; i < 10; i++ {
m.notifySubscribers()
}
assert.Len(t, m.dirty, 1)
}
func TestManager_PostQueueFull(t *testing.T) {
m := &Manager{
cmdq: make(chan cmd, 2),
stopChan: make(chan struct{}),
}
m.post(func() {})
m.post(func() {})
m.post(func() {})
m.post(func() {})
assert.Len(t, m.cmdq, 2)
}
func TestManager_GetStateNilState(t *testing.T) {
m := &Manager{}
s := m.GetState()
assert.NotNil(t, s.Groups)
assert.Empty(t, s.Groups)
}
func TestWorkspace_Fields(t *testing.T) {
ws := Workspace{
ID: "ws-1",
Name: "workspace 1",
Coordinates: []uint32{0, 0},
State: 1,
Active: true,
Urgent: false,
Hidden: false,
}
assert.Equal(t, "ws-1", ws.ID)
assert.Equal(t, "workspace 1", ws.Name)
assert.True(t, ws.Active)
assert.False(t, ws.Urgent)
assert.False(t, ws.Hidden)
}
func TestWorkspaceGroup_Fields(t *testing.T) {
group := WorkspaceGroup{
ID: "group-1",
Outputs: []string{"eDP-1", "HDMI-A-1"},
Workspaces: []*Workspace{
{ID: "ws-1", Name: "workspace 1"},
},
}
assert.Equal(t, "group-1", group.ID)
assert.Len(t, group.Outputs, 2)
assert.Len(t, group.Workspaces, 1)
}
func TestNewManager_GetRegistryError(t *testing.T) {
mockDisplay := mocks_wlclient.NewMockWaylandDisplay(t)
mockDisplay.EXPECT().Context().Return(nil)
mockDisplay.EXPECT().GetRegistry().Return(nil, errors.New("failed to get registry"))
_, err := NewManager(mockDisplay)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to get registry")
}
+1 -1
View File
@@ -33,7 +33,7 @@ type cmd struct {
} }
type Manager struct { type Manager struct {
display *wlclient.Display display wlclient.WaylandDisplay
ctx *wlclient.Context ctx *wlclient.Context
registry *wlclient.Registry registry *wlclient.Registry
manager *ext_workspace.ExtWorkspaceManagerV1 manager *ext_workspace.ExtWorkspaceManagerV1
+1
View File
@@ -42,6 +42,7 @@ func handleMatugenQueue(conn net.Conn, req models.Request) {
StockColors: getString("stockColors"), StockColors: getString("stockColors"),
SyncModeWithPortal: getBool("syncModeWithPortal", false), SyncModeWithPortal: getBool("syncModeWithPortal", false),
TerminalsAlwaysDark: getBool("terminalsAlwaysDark", false), TerminalsAlwaysDark: getBool("terminalsAlwaysDark", false),
SkipTemplates: getString("skipTemplates"),
} }
wait := getBool("wait", true) wait := getBool("wait", true)
+10
View File
@@ -8,6 +8,7 @@ import (
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/apppicker" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/apppicker"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/bluez" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/bluez"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/brightness" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/brightness"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/clipboard"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/cups" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/cups"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/dwl" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/dwl"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/evdev" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/evdev"
@@ -147,6 +148,15 @@ func RouteRequest(conn net.Conn, req models.Request) {
return return
} }
if strings.HasPrefix(req.Method, "clipboard.") {
if clipboardManager == nil {
models.RespondError(conn, req.ID, "clipboard manager not initialized")
return
}
clipboard.HandleRequest(conn, req, clipboardManager)
return
}
switch req.Method { switch req.Method {
case "ping": case "ping":
models.Respond(conn, req.ID, "pong") models.Respond(conn, req.ID, "pong")
+92 -5
View File
@@ -18,6 +18,7 @@ import (
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/apppicker" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/apppicker"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/bluez" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/bluez"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/brightness" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/brightness"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/clipboard"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/cups" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/cups"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/dwl" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/dwl"
"github.com/AvengeMedia/DankMaterialShell/core/internal/server/evdev" "github.com/AvengeMedia/DankMaterialShell/core/internal/server/evdev"
@@ -32,7 +33,7 @@ import (
"github.com/AvengeMedia/DankMaterialShell/core/pkg/syncmap" "github.com/AvengeMedia/DankMaterialShell/core/pkg/syncmap"
) )
const APIVersion = 22 const APIVersion = 23
var CLIVersion = "dev" var CLIVersion = "dev"
@@ -63,6 +64,7 @@ var extWorkspaceManager *extworkspace.Manager
var brightnessManager *brightness.Manager var brightnessManager *brightness.Manager
var wlrOutputManager *wlroutput.Manager var wlrOutputManager *wlroutput.Manager
var evdevManager *evdev.Manager var evdevManager *evdev.Manager
var clipboardManager *clipboard.Manager
var wlContext *wlcontext.SharedContext var wlContext *wlcontext.SharedContext
var capabilitySubscribers syncmap.Map[string, chan ServerInfo] var capabilitySubscribers syncmap.Map[string, chan ServerInfo]
@@ -336,6 +338,31 @@ func InitializeEvdevManager() error {
return nil return nil
} }
func InitializeClipboardManager() error {
log.Info("Attempting to initialize clipboard manager...")
if wlContext == nil {
ctx, err := wlcontext.New()
if err != nil {
log.Errorf("Failed to create shared Wayland context: %v", err)
return err
}
wlContext = ctx
}
config := clipboard.LoadConfig()
manager, err := clipboard.NewManager(wlContext, config)
if err != nil {
log.Errorf("Failed to initialize clipboard manager: %v", err)
return err
}
clipboardManager = manager
log.Info("Clipboard manager initialized successfully")
return nil
}
func handleConnection(conn net.Conn) { func handleConnection(conn net.Conn) {
defer conn.Close() defer conn.Close()
@@ -409,6 +436,10 @@ func getCapabilities() Capabilities {
caps = append(caps, "evdev") caps = append(caps, "evdev")
} }
if clipboardManager != nil {
caps = append(caps, "clipboard")
}
return Capabilities{Capabilities: caps} return Capabilities{Capabilities: caps}
} }
@@ -463,6 +494,10 @@ func getServerInfo() ServerInfo {
caps = append(caps, "evdev") caps = append(caps, "evdev")
} }
if clipboardManager != nil {
caps = append(caps, "clipboard")
}
return ServerInfo{ return ServerInfo{
APIVersion: APIVersion, APIVersion: APIVersion,
CLIVersion: CLIVersion, CLIVersion: CLIVersion,
@@ -1034,6 +1069,38 @@ func handleSubscribe(conn net.Conn, req models.Request) {
}() }()
} }
if shouldSubscribe("clipboard") && clipboardManager != nil {
wg.Add(1)
clipboardChan := clipboardManager.Subscribe(clientID + "-clipboard")
go func() {
defer wg.Done()
defer clipboardManager.Unsubscribe(clientID + "-clipboard")
initialState := clipboardManager.GetState()
select {
case eventChan <- ServiceEvent{Service: "clipboard", Data: initialState}:
case <-stopChan:
return
}
for {
select {
case state, ok := <-clipboardChan:
if !ok {
return
}
select {
case eventChan <- ServiceEvent{Service: "clipboard", Data: state}:
case <-stopChan:
return
}
case <-stopChan:
return
}
}
}()
}
go func() { go func() {
wg.Wait() wg.Wait()
close(eventChan) close(eventChan)
@@ -1096,6 +1163,9 @@ func cleanupManagers() {
if evdevManager != nil { if evdevManager != nil {
evdevManager.Close() evdevManager.Close()
} }
if clipboardManager != nil {
clipboardManager.Close()
}
if wlContext != nil { if wlContext != nil {
wlContext.Close() wlContext.Close()
} }
@@ -1259,6 +1329,18 @@ func Start(printDocs bool) error {
log.Info("Evdev:") log.Info("Evdev:")
log.Info(" evdev.getState - Get current evdev state (caps lock)") log.Info(" evdev.getState - Get current evdev state (caps lock)")
log.Info(" evdev.subscribe - Subscribe to evdev state changes (streaming)") log.Info(" evdev.subscribe - Subscribe to evdev state changes (streaming)")
log.Info("Clipboard:")
log.Info(" clipboard.getState - Get clipboard state (enabled, history, current)")
log.Info(" clipboard.getHistory - Get clipboard history with previews")
log.Info(" clipboard.getEntry - Get full entry by ID (params: id)")
log.Info(" clipboard.deleteEntry - Delete entry by ID (params: id)")
log.Info(" clipboard.clearHistory - Clear all clipboard history")
log.Info(" clipboard.copy - Copy text to clipboard (params: text)")
log.Info(" clipboard.paste - Get current clipboard text")
log.Info(" clipboard.search - Search history (params: query?, mimeType?, isImage?, limit?, offset?, before?, after?)")
log.Info(" clipboard.getConfig - Get clipboard configuration")
log.Info(" clipboard.setConfig - Set configuration (params: maxHistory?, maxEntrySize?, autoClearDays?, clearAtStartup?)")
log.Info(" clipboard.subscribe - Subscribe to clipboard state changes (streaming)")
log.Info("") log.Info("")
} }
log.Info("Initializing managers...") log.Info("Initializing managers...")
@@ -1366,10 +1448,15 @@ func Start(printDocs bool) error {
} }
}() }()
if wlContext != nil { go func() {
wlContext.Start() if err := InitializeClipboardManager(); err != nil {
log.Info("Wayland event dispatcher started") log.Warnf("Clipboard manager unavailable: %v", err)
} }
if wlContext != nil {
wlContext.Start()
log.Info("Wayland event dispatcher started")
}
}()
log.Info("") log.Info("")
log.Infof("Ready! Capabilities: %v", getCapabilities().Capabilities) log.Infof("Ready! Capabilities: %v", getCapabilities().Capabilities)
+36 -21
View File
@@ -19,7 +19,7 @@ import (
const animKelvinStep = 25 const animKelvinStep = 25
func NewManager(display *wlclient.Display, config Config) (*Manager, error) { func NewManager(display wlclient.WaylandDisplay, config Config) (*Manager, error) {
if err := config.Validate(); err != nil { if err := config.Validate(); err != nil {
return nil, err return nil, err
} }
@@ -268,31 +268,36 @@ func (m *Manager) setupOutputControls(outputs []*wlclient.Output, manager *wlr_g
} }
func (m *Manager) setupControlHandlers(state *outputState, control *wlr_gamma_control.ZwlrGammaControlV1) { func (m *Manager) setupControlHandlers(state *outputState, control *wlr_gamma_control.ZwlrGammaControlV1) {
outputID := state.id
control.SetGammaSizeHandler(func(e wlr_gamma_control.ZwlrGammaControlV1GammaSizeEvent) { control.SetGammaSizeHandler(func(e wlr_gamma_control.ZwlrGammaControlV1GammaSizeEvent) {
if out, ok := m.outputs.Load(state.id); ok { size := e.Size
out.rampSize = e.Size
out.failed = false
out.retryCount = 0
}
m.post(func() { m.post(func() {
if out, ok := m.outputs.Load(outputID); ok {
out.rampSize = size
out.failed = false
out.retryCount = 0
}
m.applyCurrentTemp() m.applyCurrentTemp()
}) })
}) })
control.SetFailedHandler(func(_ wlr_gamma_control.ZwlrGammaControlV1FailedEvent) { control.SetFailedHandler(func(_ wlr_gamma_control.ZwlrGammaControlV1FailedEvent) {
out, ok := m.outputs.Load(state.id) m.post(func() {
if !ok { out, ok := m.outputs.Load(outputID)
return if !ok {
} return
out.failed = true }
out.rampSize = 0 out.failed = true
out.retryCount++ out.rampSize = 0
out.lastFailTime = time.Now() out.retryCount++
out.lastFailTime = time.Now()
backoff := time.Duration(300<<uint(min(out.retryCount-1, 4))) * time.Millisecond backoff := time.Duration(300<<uint(min(out.retryCount-1, 4))) * time.Millisecond
time.AfterFunc(backoff, func() { time.AfterFunc(backoff, func() {
m.post(func() { m.post(func() {
m.recreateOutputControl(out) m.recreateOutputControl(out)
})
}) })
}) })
}) })
@@ -583,7 +588,7 @@ func (m *Manager) schedulerLoop() {
m.configMutex.RUnlock() m.configMutex.RUnlock()
if enabled { if enabled {
m.applyCurrentTemp() m.post(func() { m.applyCurrentTemp() })
} }
var timer *time.Timer var timer *time.Timer
@@ -625,14 +630,14 @@ func (m *Manager) schedulerLoop() {
enabled := m.config.Enabled enabled := m.config.Enabled
m.configMutex.RUnlock() m.configMutex.RUnlock()
if enabled { if enabled {
m.applyCurrentTemp() m.post(func() { m.applyCurrentTemp() })
} }
case <-timer.C: case <-timer.C:
m.configMutex.RLock() m.configMutex.RLock()
enabled := m.config.Enabled enabled := m.config.Enabled
m.configMutex.RUnlock() m.configMutex.RUnlock()
if enabled { if enabled {
m.applyCurrentTemp() m.post(func() { m.applyCurrentTemp() })
} }
} }
} }
@@ -643,6 +648,16 @@ func (m *Manager) applyCurrentTemp() {
return return
} }
m.configMutex.RLock()
low, high := m.config.LowTemp, m.config.HighTemp
m.configMutex.RUnlock()
if low == high {
m.applyGamma(low)
m.updateStateFromSchedule()
return
}
if !m.hasValidSchedule() { if !m.hasValidSchedule() {
m.updateStateFromSchedule() m.updateStateFromSchedule()
return return
@@ -0,0 +1,414 @@
package wayland
import (
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
mocks_wlclient "github.com/AvengeMedia/DankMaterialShell/core/internal/mocks/wlclient"
)
func TestManager_ActorSerializesOutputStateAccess(t *testing.T) {
m := &Manager{
cmdq: make(chan cmd, 128),
stopChan: make(chan struct{}),
}
m.wg.Add(1)
go m.waylandActor()
state := &outputState{
id: 1,
registryName: 100,
rampSize: 256,
}
m.outputs.Store(state.id, state)
var wg sync.WaitGroup
const goroutines = 50
const iterations = 100
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.post(func() {
if out, ok := m.outputs.Load(state.id); ok {
out.rampSize = uint32(j)
out.failed = j%2 == 0
out.retryCount = j
out.lastFailTime = time.Now()
}
})
}
}(i)
}
wg.Wait()
done := make(chan struct{})
m.post(func() { close(done) })
<-done
close(m.stopChan)
m.wg.Wait()
}
func TestManager_ConcurrentSubscriberAccess(t *testing.T) {
m := &Manager{
stopChan: make(chan struct{}),
dirty: make(chan struct{}, 1),
updateTrigger: make(chan struct{}, 1),
}
var wg sync.WaitGroup
const goroutines = 20
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
subID := string(rune('a' + id))
ch := m.Subscribe(subID)
assert.NotNil(t, ch)
time.Sleep(time.Millisecond)
m.Unsubscribe(subID)
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentGetState(t *testing.T) {
m := &Manager{
state: &State{
CurrentTemp: 5000,
IsDay: true,
},
}
var wg sync.WaitGroup
const goroutines = 50
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
s := m.GetState()
assert.GreaterOrEqual(t, s.CurrentTemp, 0)
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.stateMutex.Lock()
m.state = &State{
CurrentTemp: 4000 + i*100,
IsDay: j%2 == 0,
}
m.stateMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentConfigAccess(t *testing.T) {
m := &Manager{
config: DefaultConfig(),
}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.configMutex.RLock()
_ = m.config.LowTemp
_ = m.config.HighTemp
_ = m.config.Enabled
m.configMutex.RUnlock()
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.configMutex.Lock()
m.config.LowTemp = 3000 + j
m.config.HighTemp = 7000 - j
m.config.Enabled = j%2 == 0
m.configMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_SyncmapOutputsConcurrentAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := uint32(id)
for j := 0; j < iterations; j++ {
state := &outputState{
id: key,
rampSize: uint32(j),
failed: j%2 == 0,
}
m.outputs.Store(key, state)
if loaded, ok := m.outputs.Load(key); ok {
assert.Equal(t, key, loaded.id)
}
m.outputs.Range(func(k uint32, v *outputState) bool {
_ = v.rampSize
_ = v.failed
return true
})
}
m.outputs.Delete(key)
}(i)
}
wg.Wait()
}
func TestManager_LocationCacheConcurrentAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 20
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.locationMutex.RLock()
_ = m.cachedIPLat
_ = m.cachedIPLon
m.locationMutex.RUnlock()
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
lat := float64(40 + i)
lon := float64(-74 + j)
m.locationMutex.Lock()
m.cachedIPLat = &lat
m.cachedIPLon = &lon
m.locationMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_ScheduleConcurrentAccess(t *testing.T) {
now := time.Now()
m := &Manager{
schedule: sunSchedule{
times: SunTimes{
Dawn: now,
Sunrise: now.Add(time.Hour),
Sunset: now.Add(12 * time.Hour),
Night: now.Add(13 * time.Hour),
},
},
}
var wg sync.WaitGroup
const goroutines = 20
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.scheduleMutex.RLock()
_ = m.schedule.times.Dawn
_ = m.schedule.times.Sunrise
_ = m.schedule.times.Sunset
_ = m.schedule.condition
m.scheduleMutex.RUnlock()
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.scheduleMutex.Lock()
m.schedule.times.Dawn = time.Now()
m.schedule.times.Sunrise = time.Now().Add(time.Hour)
m.schedule.condition = SunNormal
m.scheduleMutex.Unlock()
}
}()
}
wg.Wait()
}
func TestInterpolate_EdgeCases(t *testing.T) {
now := time.Now()
tests := []struct {
name string
now time.Time
start time.Time
stop time.Time
expected float64
}{
{
name: "same start and stop",
now: now,
start: now,
stop: now,
expected: 1.0,
},
{
name: "now before start",
now: now,
start: now.Add(time.Hour),
stop: now.Add(2 * time.Hour),
expected: 0.0,
},
{
name: "now after stop",
now: now.Add(3 * time.Hour),
start: now,
stop: now.Add(time.Hour),
expected: 1.0,
},
{
name: "now at midpoint",
now: now.Add(30 * time.Minute),
start: now,
stop: now.Add(time.Hour),
expected: 0.5,
},
{
name: "now equals start",
now: now,
start: now,
stop: now.Add(time.Hour),
expected: 0.0,
},
{
name: "now equals stop",
now: now.Add(time.Hour),
start: now,
stop: now.Add(time.Hour),
expected: 1.0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := interpolate(tt.now, tt.start, tt.stop)
assert.InDelta(t, tt.expected, result, 0.01)
})
}
}
func TestGenerateGammaRamp_ZeroSize(t *testing.T) {
ramp := GenerateGammaRamp(0, 5000, 1.0)
assert.Empty(t, ramp.Red)
assert.Empty(t, ramp.Green)
assert.Empty(t, ramp.Blue)
}
func TestGenerateGammaRamp_ValidSizes(t *testing.T) {
sizes := []uint32{1, 256, 1024}
temps := []int{1000, 4000, 6500, 10000}
gammas := []float64{0.5, 1.0, 2.0}
for _, size := range sizes {
for _, temp := range temps {
for _, gamma := range gammas {
ramp := GenerateGammaRamp(size, temp, gamma)
assert.Len(t, ramp.Red, int(size))
assert.Len(t, ramp.Green, int(size))
assert.Len(t, ramp.Blue, int(size))
}
}
}
}
func TestNotifySubscribers_NonBlocking(t *testing.T) {
m := &Manager{
dirty: make(chan struct{}, 1),
}
for i := 0; i < 10; i++ {
m.notifySubscribers()
}
assert.Len(t, m.dirty, 1)
}
func TestNewManager_GetRegistryError(t *testing.T) {
mockDisplay := mocks_wlclient.NewMockWaylandDisplay(t)
mockDisplay.EXPECT().Context().Return(nil)
mockDisplay.EXPECT().GetRegistry().Return(nil, errors.New("failed to get registry"))
config := DefaultConfig()
_, err := NewManager(mockDisplay, config)
assert.Error(t, err)
assert.Contains(t, err.Error(), "get registry")
}
func TestNewManager_InvalidConfig(t *testing.T) {
mockDisplay := mocks_wlclient.NewMockWaylandDisplay(t)
config := Config{
LowTemp: 500,
HighTemp: 6500,
Gamma: 1.0,
}
_, err := NewManager(mockDisplay, config)
assert.Error(t, err)
}
+1 -1
View File
@@ -65,7 +65,7 @@ type Manager struct {
state *State state *State
stateMutex sync.RWMutex stateMutex sync.RWMutex
display *wlclient.Display display wlclient.WaylandDisplay
ctx *wlclient.Context ctx *wlclient.Context
registry *wlclient.Registry registry *wlclient.Registry
gammaControl any gammaControl any
+51 -4
View File
@@ -1,18 +1,32 @@
package wlcontext package wlcontext
import ( import (
"errors"
"fmt" "fmt"
"os"
"sync" "sync"
"time"
"github.com/AvengeMedia/DankMaterialShell/core/internal/errdefs" "github.com/AvengeMedia/DankMaterialShell/core/internal/errdefs"
"github.com/AvengeMedia/DankMaterialShell/core/internal/log" "github.com/AvengeMedia/DankMaterialShell/core/internal/log"
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client" wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
) )
type WaylandContext interface {
Display() *wlclient.Display
Post(fn func())
FatalError() <-chan error
Start()
Close()
}
var _ WaylandContext = (*SharedContext)(nil)
type SharedContext struct { type SharedContext struct {
display *wlclient.Display display *wlclient.Display
stopChan chan struct{} stopChan chan struct{}
fatalError chan error fatalError chan error
cmdQueue chan func()
wg sync.WaitGroup wg sync.WaitGroup
mu sync.Mutex mu sync.Mutex
started bool started bool
@@ -28,6 +42,7 @@ func New() (*SharedContext, error) {
display: display, display: display,
stopChan: make(chan struct{}), stopChan: make(chan struct{}),
fatalError: make(chan error, 1), fatalError: make(chan error, 1),
cmdQueue: make(chan func(), 256),
started: false, started: false,
} }
@@ -51,6 +66,13 @@ func (sc *SharedContext) Display() *wlclient.Display {
return sc.display return sc.display
} }
func (sc *SharedContext) Post(fn func()) {
select {
case sc.cmdQueue <- fn:
default:
}
}
func (sc *SharedContext) FatalError() <-chan error { func (sc *SharedContext) FatalError() <-chan error {
return sc.fatalError return sc.fatalError
} }
@@ -74,10 +96,35 @@ func (sc *SharedContext) eventDispatcher() {
case <-sc.stopChan: case <-sc.stopChan:
return return
default: default:
if err := ctx.Dispatch(); err != nil { }
log.Errorf("Wayland connection error: %v", err)
return sc.drainCmdQueue()
}
if err := ctx.SetReadDeadline(time.Now().Add(50 * time.Millisecond)); err != nil {
log.Errorf("Failed to set read deadline: %v", err)
}
err := ctx.Dispatch()
if err := ctx.SetReadDeadline(time.Time{}); err != nil {
log.Errorf("Failed to clear read deadline: %v", err)
}
switch {
case err == nil:
case errors.Is(err, os.ErrDeadlineExceeded):
default:
log.Errorf("Wayland connection error: %v", err)
return
}
}
}
func (sc *SharedContext) drainCmdQueue() {
for {
select {
case fn := <-sc.cmdQueue:
fn()
default:
return
} }
} }
} }
@@ -0,0 +1,127 @@
package wlcontext
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSharedContext_ConcurrentPostNonBlocking(t *testing.T) {
sc := &SharedContext{
cmdQueue: make(chan func(), 256),
stopChan: make(chan struct{}),
}
var wg sync.WaitGroup
const goroutines = 100
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
sc.Post(func() {
_ = id + j
})
}
}(i)
}
wg.Wait()
}
func TestSharedContext_PostQueueFull(t *testing.T) {
sc := &SharedContext{
cmdQueue: make(chan func(), 2),
stopChan: make(chan struct{}),
}
sc.Post(func() {})
sc.Post(func() {})
sc.Post(func() {})
sc.Post(func() {})
assert.Len(t, sc.cmdQueue, 2)
}
func TestSharedContext_StartMultipleTimes(t *testing.T) {
sc := &SharedContext{
cmdQueue: make(chan func(), 256),
stopChan: make(chan struct{}),
started: false,
}
var wg sync.WaitGroup
const goroutines = 10
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
sc.Start()
}()
}
wg.Wait()
assert.True(t, sc.started)
}
func TestSharedContext_DrainCmdQueue(t *testing.T) {
sc := &SharedContext{
cmdQueue: make(chan func(), 256),
stopChan: make(chan struct{}),
}
counter := 0
for i := 0; i < 10; i++ {
sc.cmdQueue <- func() {
counter++
}
}
sc.drainCmdQueue()
assert.Equal(t, 10, counter)
assert.Len(t, sc.cmdQueue, 0)
}
func TestSharedContext_DrainCmdQueueEmpty(t *testing.T) {
sc := &SharedContext{
cmdQueue: make(chan func(), 256),
stopChan: make(chan struct{}),
}
sc.drainCmdQueue()
assert.Len(t, sc.cmdQueue, 0)
}
func TestSharedContext_ConcurrentDrainAndPost(t *testing.T) {
sc := &SharedContext{
cmdQueue: make(chan func(), 256),
stopChan: make(chan struct{}),
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 100; i++ {
sc.Post(func() {})
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 50; i++ {
sc.drainCmdQueue()
}
}()
wg.Wait()
}
+1 -1
View File
@@ -9,7 +9,7 @@ import (
wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client" wlclient "github.com/AvengeMedia/DankMaterialShell/core/pkg/go-wayland/wayland/client"
) )
func NewManager(display *wlclient.Display) (*Manager, error) { func NewManager(display wlclient.WaylandDisplay) (*Manager, error) {
m := &Manager{ m := &Manager{
display: display, display: display,
ctx: display.Context(), ctx: display.Context(),
@@ -0,0 +1,414 @@
package wlroutput
import (
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
mocks_wlclient "github.com/AvengeMedia/DankMaterialShell/core/internal/mocks/wlclient"
)
func TestStateChanged_BothNil(t *testing.T) {
assert.True(t, stateChanged(nil, nil))
}
func TestStateChanged_OneNil(t *testing.T) {
s := &State{Serial: 1}
assert.True(t, stateChanged(s, nil))
assert.True(t, stateChanged(nil, s))
}
func TestStateChanged_SerialDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{}}
b := &State{Serial: 2, Outputs: []Output{}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputCountDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1"}}}
b := &State{Serial: 1, Outputs: []Output{}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputNameDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Enabled: true}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "HDMI-A-1", Enabled: true}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputEnabledDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Enabled: true}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Enabled: false}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputPositionDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", X: 0, Y: 0}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", X: 1920, Y: 0}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputTransformDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Transform: 0}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Transform: 1}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputScaleDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Scale: 1.0}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Scale: 2.0}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_OutputAdaptiveSyncDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", AdaptiveSync: 0}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", AdaptiveSync: 1}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_CurrentModeNilVsNonNil(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", CurrentMode: nil}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", CurrentMode: &OutputMode{Width: 1920}}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_CurrentModeDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{
Name: "eDP-1",
CurrentMode: &OutputMode{Width: 1920, Height: 1080, Refresh: 60000},
}}}
b := &State{Serial: 1, Outputs: []Output{{
Name: "eDP-1",
CurrentMode: &OutputMode{Width: 2560, Height: 1440, Refresh: 60000},
}}}
assert.True(t, stateChanged(a, b))
b.Outputs[0].CurrentMode.Width = 1920
b.Outputs[0].CurrentMode.Height = 1080
b.Outputs[0].CurrentMode.Refresh = 144000
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_ModesLengthDiffers(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Modes: []OutputMode{{Width: 1920}}}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", Modes: []OutputMode{{Width: 1920}, {Width: 1280}}}}}
assert.True(t, stateChanged(a, b))
}
func TestStateChanged_Equal(t *testing.T) {
mode := OutputMode{Width: 1920, Height: 1080, Refresh: 60000, Preferred: true}
a := &State{
Serial: 5,
Outputs: []Output{{
Name: "eDP-1",
Description: "Built-in display",
Make: "BOE",
Model: "0x0ABC",
SerialNumber: "12345",
PhysicalWidth: 309,
PhysicalHeight: 174,
Enabled: true,
X: 0,
Y: 0,
Transform: 0,
Scale: 1.0,
CurrentMode: &mode,
Modes: []OutputMode{mode},
AdaptiveSync: 0,
}},
}
b := &State{
Serial: 5,
Outputs: []Output{{
Name: "eDP-1",
Description: "Built-in display",
Make: "BOE",
Model: "0x0ABC",
SerialNumber: "12345",
PhysicalWidth: 309,
PhysicalHeight: 174,
Enabled: true,
X: 0,
Y: 0,
Transform: 0,
Scale: 1.0,
CurrentMode: &mode,
Modes: []OutputMode{mode},
AdaptiveSync: 0,
}},
}
assert.False(t, stateChanged(a, b))
}
func TestManager_ConcurrentGetState(t *testing.T) {
m := &Manager{
state: &State{
Serial: 1,
Outputs: []Output{{Name: "eDP-1", Enabled: true}},
},
}
var wg sync.WaitGroup
const goroutines = 50
const iterations = 100
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations; j++ {
s := m.GetState()
_ = s.Serial
_ = s.Outputs
}
}()
}
for i := 0; i < goroutines/2; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < iterations; j++ {
m.stateMutex.Lock()
m.state = &State{
Serial: uint32(j),
Outputs: []Output{{Name: "eDP-1", Scale: float64(j % 3)}},
}
m.stateMutex.Unlock()
}
}(i)
}
wg.Wait()
}
func TestManager_ConcurrentSubscriberAccess(t *testing.T) {
m := &Manager{
stopChan: make(chan struct{}),
dirty: make(chan struct{}, 1),
}
var wg sync.WaitGroup
const goroutines = 20
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
subID := string(rune('a' + id))
ch := m.Subscribe(subID)
assert.NotNil(t, ch)
time.Sleep(time.Millisecond)
m.Unsubscribe(subID)
}(i)
}
wg.Wait()
}
func TestManager_SyncmapHeadsConcurrentAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := uint32(id)
for j := 0; j < iterations; j++ {
state := &headState{
id: key,
name: "test-head",
enabled: j%2 == 0,
scale: float64(j % 3),
modeIDs: []uint32{uint32(j)},
}
m.heads.Store(key, state)
if loaded, ok := m.heads.Load(key); ok {
assert.Equal(t, key, loaded.id)
}
m.heads.Range(func(k uint32, v *headState) bool {
_ = v.name
_ = v.enabled
return true
})
}
m.heads.Delete(key)
}(i)
}
wg.Wait()
}
func TestManager_SyncmapModesConcurrentAccess(t *testing.T) {
m := &Manager{}
var wg sync.WaitGroup
const goroutines = 30
const iterations = 50
for i := 0; i < goroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
key := uint32(id)
for j := 0; j < iterations; j++ {
state := &modeState{
id: key,
width: int32(1920 + j),
height: int32(1080 + j),
refresh: 60000,
preferred: j == 0,
}
m.modes.Store(key, state)
if loaded, ok := m.modes.Load(key); ok {
assert.Equal(t, key, loaded.id)
}
m.modes.Range(func(k uint32, v *modeState) bool {
_ = v.width
_ = v.height
return true
})
}
m.modes.Delete(key)
}(i)
}
wg.Wait()
}
func TestManager_NotifySubscribersNonBlocking(t *testing.T) {
m := &Manager{
dirty: make(chan struct{}, 1),
}
for i := 0; i < 10; i++ {
m.notifySubscribers()
}
assert.Len(t, m.dirty, 1)
}
func TestManager_PostQueueFull(t *testing.T) {
m := &Manager{
cmdq: make(chan cmd, 2),
stopChan: make(chan struct{}),
}
m.post(func() {})
m.post(func() {})
m.post(func() {})
m.post(func() {})
assert.Len(t, m.cmdq, 2)
}
func TestManager_GetStateNilState(t *testing.T) {
m := &Manager{}
s := m.GetState()
assert.NotNil(t, s.Outputs)
assert.Equal(t, uint32(0), s.Serial)
}
func TestManager_FatalErrorChannel(t *testing.T) {
m := &Manager{
fatalError: make(chan error, 1),
}
ch := m.FatalError()
assert.NotNil(t, ch)
m.fatalError <- assert.AnError
err := <-ch
assert.Error(t, err)
}
func TestOutputMode_Fields(t *testing.T) {
mode := OutputMode{
Width: 1920,
Height: 1080,
Refresh: 60000,
Preferred: true,
ID: 42,
}
assert.Equal(t, int32(1920), mode.Width)
assert.Equal(t, int32(1080), mode.Height)
assert.Equal(t, int32(60000), mode.Refresh)
assert.True(t, mode.Preferred)
assert.Equal(t, uint32(42), mode.ID)
}
func TestOutput_Fields(t *testing.T) {
out := Output{
Name: "eDP-1",
Description: "Built-in display",
Make: "BOE",
Model: "0x0ABC",
SerialNumber: "12345",
PhysicalWidth: 309,
PhysicalHeight: 174,
Enabled: true,
X: 0,
Y: 0,
Transform: 0,
Scale: 1.5,
AdaptiveSync: 1,
ID: 1,
}
assert.Equal(t, "eDP-1", out.Name)
assert.Equal(t, "Built-in display", out.Description)
assert.True(t, out.Enabled)
assert.Equal(t, float64(1.5), out.Scale)
assert.Equal(t, uint32(1), out.AdaptiveSync)
}
func TestHeadState_ModeIDsSlice(t *testing.T) {
head := &headState{
id: 1,
modeIDs: make([]uint32, 0),
}
head.modeIDs = append(head.modeIDs, 1, 2, 3)
assert.Len(t, head.modeIDs, 3)
assert.Equal(t, uint32(1), head.modeIDs[0])
}
func TestStateChanged_BothCurrentModeNil(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", CurrentMode: nil}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1", CurrentMode: nil}}}
assert.False(t, stateChanged(a, b))
}
func TestStateChanged_IndexOutOfBounds(t *testing.T) {
a := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1"}}}
b := &State{Serial: 1, Outputs: []Output{{Name: "eDP-1"}, {Name: "HDMI-A-1"}}}
assert.True(t, stateChanged(a, b))
}
func TestNewManager_GetRegistryError(t *testing.T) {
mockDisplay := mocks_wlclient.NewMockWaylandDisplay(t)
mockDisplay.EXPECT().Context().Return(nil)
mockDisplay.EXPECT().GetRegistry().Return(nil, errors.New("failed to get registry"))
_, err := NewManager(mockDisplay)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to get registry")
}
+1 -1
View File
@@ -45,7 +45,7 @@ type cmd struct {
} }
type Manager struct { type Manager struct {
display *wlclient.Display display wlclient.WaylandDisplay
ctx *wlclient.Context ctx *wlclient.Context
registry *wlclient.Registry registry *wlclient.Registry
manager *wlr_output_management.ZwlrOutputManagerV1 manager *wlr_output_management.ZwlrOutputManagerV1
-30
View File
@@ -20,33 +20,3 @@ func ExpandPath(path string) (string, error) {
return expanded, nil return expanded, nil
} }
func XDGConfigHome() string {
if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
return configHome
}
if home, err := os.UserHomeDir(); err == nil {
return filepath.Join(home, ".config")
}
return filepath.Join(os.TempDir(), ".config")
}
func XDGCacheHome() string {
if cacheHome := os.Getenv("XDG_CACHE_HOME"); cacheHome != "" {
return cacheHome
}
if home, err := os.UserHomeDir(); err == nil {
return filepath.Join(home, ".cache")
}
return filepath.Join(os.TempDir(), ".cache")
}
func XDGDataHome() string {
if dataHome := os.Getenv("XDG_DATA_HOME"); dataHome != "" {
return dataHome
}
if home, err := os.UserHomeDir(); err == nil {
return filepath.Join(home, ".local", "share")
}
return filepath.Join(os.TempDir(), ".local", "share")
}
-63
View File
@@ -41,66 +41,3 @@ func TestExpandPathAbsolute(t *testing.T) {
t.Errorf("expected /absolute/path, got %s", result) t.Errorf("expected /absolute/path, got %s", result)
} }
} }
func TestXDGConfigHomeDefault(t *testing.T) {
t.Setenv("XDG_CONFIG_HOME", "")
home, err := os.UserHomeDir()
if err != nil {
t.Skip("no home directory")
}
result := XDGConfigHome()
expected := filepath.Join(home, ".config")
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
}
func TestXDGConfigHomeCustom(t *testing.T) {
t.Setenv("XDG_CONFIG_HOME", "/custom/config")
result := XDGConfigHome()
if result != "/custom/config" {
t.Errorf("expected /custom/config, got %s", result)
}
}
func TestXDGCacheHomeDefault(t *testing.T) {
t.Setenv("XDG_CACHE_HOME", "")
home, err := os.UserHomeDir()
if err != nil {
t.Skip("no home directory")
}
result := XDGCacheHome()
expected := filepath.Join(home, ".cache")
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
}
func TestXDGCacheHomeCustom(t *testing.T) {
t.Setenv("XDG_CACHE_HOME", "/custom/cache")
result := XDGCacheHome()
if result != "/custom/cache" {
t.Errorf("expected /custom/cache, got %s", result)
}
}
func TestXDGDataHomeDefault(t *testing.T) {
t.Setenv("XDG_DATA_HOME", "")
home, err := os.UserHomeDir()
if err != nil {
t.Skip("no home directory")
}
result := XDGDataHome()
expected := filepath.Join(home, ".local", "share")
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
}
func TestXDGDataHomeCustom(t *testing.T) {
t.Setenv("XDG_DATA_HOME", "/custom/data")
result := XDGDataHome()
if result != "/custom/data" {
t.Errorf("expected /custom/data, got %s", result)
}
}
+3 -3
View File
@@ -1,9 +1,9 @@
#!/bin/sh #!/usr/bin/env sh
# Runs go generate for each directory, but in parallel. Any arguments are appended to the # Runs go generate for each directory, but in parallel. Any arguments are appended to the
# go generate command. # go generate command.
# Usage: $ ./generatep [go generate arguments] # Usage: $ ./generatep [go generate arguments]
# Print all generate commands: $ ./generatep -x # Print all generate commands: $ ./generatep -x
cd ./wayland cd ./wayland || exit 1
find . -type f -name '*.go' -exec dirname {} \; | sort -u | parallel -j 0 go generate $1 {}/. find . -type f -name '*.go' -exec dirname {} \; | sort -u | parallel -j 0 go generate "$1" {}/.
@@ -15,6 +15,15 @@ type Proxy interface {
MarkZombie() MarkZombie()
} }
type WaylandDisplay interface {
Context() *Context
GetRegistry() (*Registry, error)
Roundtrip() error
Destroy() error
}
var _ WaylandDisplay = (*Display)(nil)
type BaseProxy struct { type BaseProxy struct {
ctx *Context ctx *Context
id uint32 id uint32
@@ -6,6 +6,7 @@ import (
"net" "net"
"os" "os"
"sync" "sync"
"time"
"github.com/AvengeMedia/DankMaterialShell/core/pkg/syncmap" "github.com/AvengeMedia/DankMaterialShell/core/pkg/syncmap"
) )
@@ -28,6 +29,12 @@ func (ctx *Context) Register(p Proxy) {
ctx.objects.Store(id, p) ctx.objects.Store(id, p)
} }
func (ctx *Context) RegisterWithID(p Proxy, id uint32) {
p.SetID(id)
p.SetContext(ctx)
ctx.objects.Store(id, p)
}
func (ctx *Context) Unregister(p Proxy) { func (ctx *Context) Unregister(p Proxy) {
ctx.objects.Delete(p.ID()) ctx.objects.Delete(p.ID())
} }
@@ -47,6 +54,10 @@ func (ctx *Context) Close() error {
return ctx.conn.Close() return ctx.conn.Close()
} }
func (ctx *Context) SetReadDeadline(t time.Time) error {
return ctx.conn.SetReadDeadline(t)
}
// Dispatch reads and processes incoming messages and calls [client.Dispatcher.Dispatch] on the // Dispatch reads and processes incoming messages and calls [client.Dispatcher.Dispatch] on the
// respective wayland protocol. // respective wayland protocol.
// Dispatch must be called on the same goroutine as other interactions with the Context. // Dispatch must be called on the same goroutine as other interactions with the Context.
+2 -1
View File
@@ -1,5 +1,6 @@
dms-git (0.6.2+git2419.993f14a3) nightly; urgency=medium dms-git (1.0.0+git2419.993f14a3) nightly; urgency=medium
* Major stable release v1.0.0
* widgets: make dank icon picker a popup * widgets: make dank icon picker a popup
* Previous updates included in build * Previous updates included in build
+3 -3
View File
@@ -3,19 +3,19 @@
<service name="download_url"> <service name="download_url">
<param name="protocol">https</param> <param name="protocol">https</param>
<param name="host">github.com</param> <param name="host">github.com</param>
<param name="path">/AvengeMedia/DankMaterialShell/archive/refs/tags/v0.6.2.tar.gz</param> <param name="path">/AvengeMedia/DankMaterialShell/archive/refs/tags/v1.0.2.tar.gz</param>
<param name="filename">dms-source.tar.gz</param> <param name="filename">dms-source.tar.gz</param>
</service> </service>
<!-- Download amd64 binary --> <!-- Download amd64 binary -->
<service name="download_url"> <service name="download_url">
<param name="protocol">https</param> <param name="protocol">https</param>
<param name="host">github.com</param> <param name="host">github.com</param>
<param name="path">/AvengeMedia/DankMaterialShell/releases/download/v0.6.2/dms-distropkg-amd64.gz</param> <param name="path">/AvengeMedia/DankMaterialShell/releases/download/v1.0.2/dms-distropkg-amd64.gz</param>
</service> </service>
<!-- Download arm64 binary --> <!-- Download arm64 binary -->
<service name="download_url"> <service name="download_url">
<param name="protocol">https</param> <param name="protocol">https</param>
<param name="host">github.com</param> <param name="host">github.com</param>
<param name="path">/AvengeMedia/DankMaterialShell/releases/download/v0.6.2/dms-distropkg-arm64.gz</param> <param name="path">/AvengeMedia/DankMaterialShell/releases/download/v1.0.2/dms-distropkg-arm64.gz</param>
</service> </service>
</services> </services>
+10 -2
View File
@@ -1,6 +1,14 @@
dms (0.6.2) stable; urgency=medium dms (1.0.2) stable; urgency=medium
* Update to v0.6.2 release * Update to v1.0.2 stable release
* Bug fixes and improvements
-- Avenge Media <AvengeMedia.US@gmail.com> Thu, 12 Dec 2025 14:30:00 -0500
dms (1.0.0) stable; urgency=medium
* Update to v1.0.0 release
* Major stable release
* Fix binary download paths for OBS builds * Fix binary download paths for OBS builds
* Native format: removed revisions * Native format: removed revisions
+12 -6
View File
@@ -36,15 +36,19 @@ override_dh_auto_build:
fi fi
chmod +x dms chmod +x dms
# Extract source if needed
if [ ! -d DankMaterialShell-$(UPSTREAM_VERSION) ]; then \ if [ ! -d DankMaterialShell-$(UPSTREAM_VERSION) ]; then \
if [ -f ../SOURCES/dms-source.tar.gz ]; then \ if [ -f ../SOURCES/dms-source.tar.gz ]; then \
tar -xzf ../SOURCES/dms-source.tar.gz; \ tar -xzf ../SOURCES/dms-source.tar.gz; \
elif [ -f dms-source.tar.gz ]; then \ elif [ -f dms-source.tar.gz ]; then \
tar -xzf dms-source.tar.gz; \ tar -xzf dms-source.tar.gz; \
fi; \ fi; \
if [ ! -d DankMaterialShell-$(UPSTREAM_VERSION) ] && [ -d DankMaterialShell-0.6.2 ]; then \ fi
mv DankMaterialShell-0.6.2 DankMaterialShell-$(UPSTREAM_VERSION); \ # Rename directory to match expected version
fi; \ SOURCE_DIR=$$(find . -maxdepth 1 -type d -name "DankMaterialShell-*" ! -name "DankMaterialShell-$(UPSTREAM_VERSION)" | head -n1); \
if [ -n "$$SOURCE_DIR" ]; then \
echo "Renaming $$SOURCE_DIR to DankMaterialShell-$(UPSTREAM_VERSION)"; \
mv "$$SOURCE_DIR" DankMaterialShell-$(UPSTREAM_VERSION); \
fi fi
@@ -52,9 +56,11 @@ override_dh_auto_install:
install -Dm755 dms debian/dms/usr/bin/dms install -Dm755 dms debian/dms/usr/bin/dms
mkdir -p debian/dms/usr/share/quickshell/dms debian/dms/usr/lib/systemd/user mkdir -p debian/dms/usr/share/quickshell/dms debian/dms/usr/lib/systemd/user
# Handle directory name mismatch again for install step if needed # Ensure directory has correct version name for install step
if [ ! -d DankMaterialShell-$(UPSTREAM_VERSION) ] && [ -d DankMaterialShell-0.6.2 ]; then \ SOURCE_DIR=$$(find . -maxdepth 1 -type d -name "DankMaterialShell-*" ! -name "DankMaterialShell-$(UPSTREAM_VERSION)" | head -n1); \
mv DankMaterialShell-0.6.2 DankMaterialShell-$(UPSTREAM_VERSION); \ if [ -n "$$SOURCE_DIR" ]; then \
echo "Renaming $$SOURCE_DIR to DankMaterialShell-$(UPSTREAM_VERSION) for install"; \
mv "$$SOURCE_DIR" DankMaterialShell-$(UPSTREAM_VERSION); \
fi fi
if [ -d DankMaterialShell-$(UPSTREAM_VERSION) ]; then \ if [ -d DankMaterialShell-$(UPSTREAM_VERSION) ]; then \
cp -r DankMaterialShell-$(UPSTREAM_VERSION)/quickshell/* debian/dms/usr/share/quickshell/dms/; \ cp -r DankMaterialShell-$(UPSTREAM_VERSION)/quickshell/* debian/dms/usr/share/quickshell/dms/; \
-1
View File
@@ -15,7 +15,6 @@ VCS: {{{ git_repo_vcs }}}
Source0: {{{ git_repo_pack }}} Source0: {{{ git_repo_pack }}}
BuildRequires: git-core BuildRequires: git-core
BuildRequires: rpkg
# For the _tmpfilesdir macro. # For the _tmpfilesdir macro.
BuildRequires: systemd-rpm-macros BuildRequires: systemd-rpm-macros
-1
View File
@@ -16,7 +16,6 @@ VCS: {{{ git_repo_vcs }}}
Source0: {{{ git_repo_pack }}} Source0: {{{ git_repo_pack }}}
BuildRequires: git-core BuildRequires: git-core
BuildRequires: rpkg
BuildRequires: gzip BuildRequires: gzip
BuildRequires: golang >= 1.24 BuildRequires: golang >= 1.24
BuildRequires: make BuildRequires: make
+5 -18
View File
@@ -9,32 +9,19 @@ let
cfg = config.programs.dankMaterialShell; cfg = config.programs.dankMaterialShell;
in in
{ {
qmlPath = "${dmsPkgs.dms-shell}/share/quickshell/dms";
packages = [ packages = [
pkgs.material-symbols
pkgs.inter
pkgs.fira-code
pkgs.ddcutil
pkgs.libsForQt5.qt5ct
pkgs.kdePackages.qt6ct
dmsPkgs.dms-shell dmsPkgs.dms-shell
] ]
++ lib.optional cfg.enableSystemMonitoring dmsPkgs.dgop ++ lib.optional cfg.enableSystemMonitoring dmsPkgs.dgop
++ lib.optionals cfg.enableClipboard [
pkgs.cliphist
pkgs.wl-clipboard
]
++ lib.optionals cfg.enableVPN [ ++ lib.optionals cfg.enableVPN [
pkgs.glib pkgs.glib
pkgs.networkmanager pkgs.networkmanager
] ]
++ lib.optional cfg.enableBrightnessControl pkgs.brightnessctl
++ lib.optional cfg.enableColorPicker pkgs.hyprpicker
++ lib.optional cfg.enableDynamicTheming pkgs.matugen ++ lib.optional cfg.enableDynamicTheming pkgs.matugen
++ lib.optional cfg.enableAudioWavelength pkgs.cava ++ lib.optional cfg.enableAudioWavelength pkgs.cava
++ lib.optional cfg.enableCalendarEvents pkgs.khal ++ lib.optional cfg.enableCalendarEvents pkgs.khal;
++ lib.optional cfg.enableSystemSound pkgs.kdePackages.qtmultimedia;
plugins = lib.mapAttrs (name: plugin: {
source = plugin.src;
}) (lib.filterAttrs (n: v: v.enable) cfg.plugins);
} }
+26 -4
View File
@@ -139,10 +139,32 @@ in
)} )}
if [ -f session.json ]; then if [ -f session.json ]; then
if cp "$(${jq} -r '.wallpaperPath' session.json)" wallpaper.jpg; then copy_wallpaper() {
mv session.json session.orig.json local path=$(${jq} -r ".$1 // empty" session.json)
${jq} '.wallpaperPath = "${cacheDir}/wallpaper.jpg"' session.orig.json > session.json if [ -f "$path" ]; then
fi cp "$path" "$2"
${jq} ".$1 = \"${cacheDir}/$2\"" session.json > session.tmp
mv session.tmp session.json
fi
}
copy_monitor_wallpapers() {
${jq} -r ".$1 // {} | to_entries[] | .key + \":\" + .value" session.json 2>/dev/null | while IFS=: read monitor path; do
local dest="$2-$(echo "$monitor" | tr -c '[:alnum:]' '-')"
if [ -f "$path" ]; then
cp "$path" "$dest"
${jq} --arg m "$monitor" --arg p "${cacheDir}/$dest" ".$1[\$m] = \$p" session.json > session.tmp
mv session.tmp session.json
fi
done
}
copy_wallpaper "wallpaperPath" "wallpaper"
copy_wallpaper "wallpaperPathLight" "wallpaper-light"
copy_wallpaper "wallpaperPathDark" "wallpaper-dark"
copy_monitor_wallpapers "monitorWallpapers" "wallpaper-monitor"
copy_monitor_wallpapers "monitorWallpapersLight" "wallpaper-monitor-light"
copy_monitor_wallpapers "monitorWallpapersDark" "wallpaper-monitor-dark"
fi fi
if [ -f settings.json ]; then if [ -f settings.json ]; then
+3 -29
View File
@@ -44,37 +44,12 @@ in
description = "The default session are only read if the session.json file don't exist"; description = "The default session are only read if the session.json file don't exist";
}; };
}; };
plugins = lib.mkOption {
type = attrsOf (
types.submodule (
{ config, ... }:
{
options = {
enable = lib.mkOption {
type = types.bool;
default = true;
description = "Whether to link this plugin";
};
src = lib.mkOption {
type = types.path;
description = "Source to link to DMS plugins directory";
};
};
}
)
);
default = { };
description = "DMS Plugins to install";
};
}; };
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
programs.quickshell = { programs.quickshell = {
enable = true; enable = true;
inherit (cfg.quickshell) package; inherit (cfg.quickshell) package;
configs.dms = common.qmlPath;
}; };
systemd.user.services.dms = lib.mkIf cfg.systemd.enable { systemd.user.services.dms = lib.mkIf cfg.systemd.enable {
@@ -82,7 +57,6 @@ in
Description = "DankMaterialShell"; Description = "DankMaterialShell";
PartOf = [ config.wayland.systemd.target ]; PartOf = [ config.wayland.systemd.target ];
After = [ config.wayland.systemd.target ]; After = [ config.wayland.systemd.target ];
X-Restart-Triggers = lib.optional cfg.systemd.restartIfChanged common.qmlPath;
}; };
Service = { Service = {
@@ -98,10 +72,10 @@ in
}; };
xdg.configFile = lib.mkMerge [ xdg.configFile = lib.mkMerge [
(lib.mapAttrs' (name: plugin: { (lib.mapAttrs' (name: value: {
name = "DankMaterialShell/plugins/${name}"; name = "DankMaterialShell/plugins/${name}";
value.source = plugin.src; inherit value;
}) (lib.filterAttrs (n: v: v.enable) cfg.plugins)) }) common.plugins)
{ {
"DankMaterialShell/default-settings.json" = lib.mkIf (cfg.default.settings != { }) { "DankMaterialShell/default-settings.json" = lib.mkIf (cfg.default.settings != { }) {
source = jsonFormat.generate "default-settings.json" cfg.default.settings; source = jsonFormat.generate "default-settings.json" cfg.default.settings;
+15 -29
View File
@@ -63,25 +63,6 @@ in
allow-when-locked = true; allow-when-locked = true;
action = dms-ipc "audio" "micmute"; action = dms-ipc "audio" "micmute";
}; };
"Mod+Alt+N" = {
allow-when-locked = true;
action = dms-ipc "night" "toggle";
hotkey-overlay.title = "Toggle Night Mode";
};
}
// lib.attrsets.optionalAttrs cfg.enableSystemMonitoring {
"Mod+M" = {
action = dms-ipc "processlist" "toggle";
hotkey-overlay.title = "Toggle Process List";
};
}
// lib.attrsets.optionalAttrs cfg.enableClipboard {
"Mod+V" = {
action = dms-ipc "clipboard" "toggle";
hotkey-overlay.title = "Toggle Clipboard Manager";
};
}
// lib.attrsets.optionalAttrs cfg.enableBrightnessControl {
"XF86MonBrightnessUp" = { "XF86MonBrightnessUp" = {
allow-when-locked = true; allow-when-locked = true;
action = dms-ipc "brightness" "increment" "5" ""; action = dms-ipc "brightness" "increment" "5" "";
@@ -90,6 +71,21 @@ in
allow-when-locked = true; allow-when-locked = true;
action = dms-ipc "brightness" "decrement" "5" ""; action = dms-ipc "brightness" "decrement" "5" "";
}; };
"Mod+Alt+N" = {
allow-when-locked = true;
action = dms-ipc "night" "toggle";
hotkey-overlay.title = "Toggle Night Mode";
};
"Mod+V" = {
action = dms-ipc "clipboard" "toggle";
hotkey-overlay.title = "Toggle Clipboard Manager";
};
}
// lib.attrsets.optionalAttrs cfg.enableSystemMonitoring {
"Mod+M" = {
action = dms-ipc "processlist" "toggle";
hotkey-overlay.title = "Toggle Process List";
};
}; };
}) })
@@ -101,16 +97,6 @@ in
"run" "run"
]; ];
} }
]
++ lib.optionals cfg.enableClipboard [
{
command = [
"wl-paste"
"--watch"
"cliphist"
"store"
];
}
]; ];
}) })
]; ];
+9 -3
View File
@@ -22,8 +22,6 @@ in
]; ];
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
environment.etc."xdg/quickshell/dms".source = "${dmsPkgs.dms-shell}/share/quickshell/dms";
systemd.user.services.dms = lib.mkIf cfg.systemd.enable { systemd.user.services.dms = lib.mkIf cfg.systemd.enable {
description = "DankMaterialShell"; description = "DankMaterialShell";
path = lib.mkForce [ ]; path = lib.mkForce [ ];
@@ -31,7 +29,7 @@ in
partOf = [ "graphical-session.target" ]; partOf = [ "graphical-session.target" ];
after = [ "graphical-session.target" ]; after = [ "graphical-session.target" ];
wantedBy = [ "graphical-session.target" ]; wantedBy = [ "graphical-session.target" ];
restartTriggers = lib.optional cfg.systemd.restartIfChanged common.qmlPath; restartIfChanged = cfg.systemd.restartIfChanged;
serviceConfig = { serviceConfig = {
ExecStart = lib.getExe dmsPkgs.dms-shell + " run --session"; ExecStart = lib.getExe dmsPkgs.dms-shell + " run --session";
@@ -40,5 +38,13 @@ in
}; };
environment.systemPackages = [ cfg.quickshell.package ] ++ common.packages; environment.systemPackages = [ cfg.quickshell.package ] ++ common.packages;
environment.etc = lib.mapAttrs' (name: value: {
name = "xdg/quickshell/dms-plugins/${name}";
inherit value;
}) common.plugins;
services.power-profiles-daemon.enable = lib.mkDefault true;
services.accounts-daemon.enable = lib.mkDefault true;
}; };
} }
+51 -21
View File
@@ -5,11 +5,25 @@
}: }:
let let
inherit (lib) types; inherit (lib) types;
path = [
"programs"
"dankMaterialShell"
];
builtInRemovedMsg = "This is now built-in in DMS and doesn't need additional dependencies.";
in in
{ {
imports = [
(lib.mkRemovedOptionModule (path ++ [ "enableBrightnessControl" ]) builtInRemovedMsg)
(lib.mkRemovedOptionModule (path ++ [ "enableColorPicker" ]) builtInRemovedMsg)
(lib.mkRemovedOptionModule (path ++ [ "enableClipboard" ]) builtInRemovedMsg)
(lib.mkRemovedOptionModule (
path ++ [ "enableSystemSound" ]
) "qtmultimedia is now included on dms-shell package.")
];
options.programs.dankMaterialShell = { options.programs.dankMaterialShell = {
enable = lib.mkEnableOption "DankMaterialShell"; enable = lib.mkEnableOption "DankMaterialShell";
systemd = { systemd = {
enable = lib.mkEnableOption "DankMaterialShell systemd startup"; enable = lib.mkEnableOption "DankMaterialShell systemd startup";
restartIfChanged = lib.mkOption { restartIfChanged = lib.mkOption {
@@ -23,26 +37,11 @@ in
default = true; default = true;
description = "Add needed dependencies to use system monitoring widgets"; description = "Add needed dependencies to use system monitoring widgets";
}; };
enableClipboard = lib.mkOption {
type = types.bool;
default = true;
description = "Add needed dependencies to use the clipboard widget";
};
enableVPN = lib.mkOption { enableVPN = lib.mkOption {
type = types.bool; type = types.bool;
default = true; default = true;
description = "Add needed dependencies to use the VPN widget"; description = "Add needed dependencies to use the VPN widget";
}; };
enableBrightnessControl = lib.mkOption {
type = types.bool;
default = true;
description = "Add needed dependencies to have brightness/backlight support";
};
enableColorPicker = lib.mkOption {
type = types.bool;
default = true;
description = "Add needed dependencies to have color picking support";
};
enableDynamicTheming = lib.mkOption { enableDynamicTheming = lib.mkOption {
type = types.bool; type = types.bool;
default = true; default = true;
@@ -58,15 +57,46 @@ in
default = true; default = true;
description = "Add calendar events support via khal"; description = "Add calendar events support via khal";
}; };
enableSystemSound = lib.mkOption {
type = types.bool;
default = true;
description = "Add needed dependencies to have system sound support";
};
quickshell = { quickshell = {
package = lib.mkPackageOption dmsPkgs "quickshell" { package = lib.mkPackageOption dmsPkgs "quickshell" {
extraDescription = "The quickshell package to use (defaults to be built from source, in the commit 26531f due to unreleased features used by DMS)."; extraDescription = "The quickshell package to use (defaults to be built from source, in the commit 26531f due to unreleased features used by DMS).";
}; };
}; };
plugins = lib.mkOption {
type = types.attrsOf (
types.submodule {
options = {
enable = lib.mkOption {
type = types.bool;
default = true;
description = "Whether to enable this plugin";
};
src = lib.mkOption {
type = types.either types.package types.path;
description = "Source of the plugin package or path";
};
};
}
);
default = { };
description = "DMS Plugins to install and enable";
example = lib.literalExpression ''
{
DockerManager = {
src = pkgs.fetchFromGitHub {
owner = "LuckShiba";
repo = "DmsDockerManager";
rev = "v1.2.0";
sha256 = "sha256-VoJCaygWnKpv0s0pqTOmzZnPM922qPDMHk4EPcgVnaU=";
};
};
AnotherPlugin = {
enable = true;
src = pkgs.another-plugin;
};
}
'';
};
}; };
} }
+5 -1
View File
@@ -3,7 +3,7 @@
%global debug_package %{nil} %global debug_package %{nil}
Name: dms Name: dms
Version: 0.6.2 Version: 1.0.2
Release: 1%{?dist} Release: 1%{?dist}
Summary: DankMaterialShell - Material 3 inspired shell for Wayland compositors Summary: DankMaterialShell - Material 3 inspired shell for Wayland compositors
@@ -105,6 +105,10 @@ pkill -USR1 -x dms >/dev/null 2>&1 || :
%{_datadir}/icons/hicolor/scalable/apps/danklogo.svg %{_datadir}/icons/hicolor/scalable/apps/danklogo.svg
%changelog %changelog
* Fri Dec 12 2025 AvengeMedia <maintainer@avengemedia.com> - 1.0.2-1
- Update to stable v1.0.2 release
- Bug fixes and improvements
* Fri Nov 22 2025 AvengeMedia <maintainer@avengemedia.com> - 0.6.2-1 * Fri Nov 22 2025 AvengeMedia <maintainer@avengemedia.com> - 0.6.2-1
- Stable release build with pre-built binaries - Stable release build with pre-built binaries
- Multi-arch support (x86_64, aarch64) - Multi-arch support (x86_64, aarch64)
+228
View File
@@ -0,0 +1,228 @@
#!/bin/bash
set -euo pipefail
# Build SRPM locally with correct tarball and upload to Copr
# Usage: ./create-upload-copr.sh VERSION [RELEASE]
# Example: ./create-upload-copr.sh 1.0.0 4
VERSION="${1:-1.0.0}"
RELEASE="${2:-1}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
echo "Building DMS v${VERSION}-${RELEASE} SRPM for Copr..."
# Setup build directories
mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
cd ~/rpmbuild/SOURCES
# Create the corrected QML tarball locally
echo "Creating QML tarball with assets..."
TEMP_DIR=$(mktemp -d)
cd "$REPO_ROOT"
# Copy quickshell contents to temp
cp -r quickshell/* "$TEMP_DIR/"
# Copy root LICENSE and CONTRIBUTING.md
cp LICENSE CONTRIBUTING.md "$TEMP_DIR/"
# Copy root assets directory (this is what was missing!)
cp -r assets "$TEMP_DIR/"
# Create tarball
cd "$TEMP_DIR"
tar --exclude='.git' \
--exclude='.github' \
--exclude='*.tar.gz' \
-czf ~/rpmbuild/SOURCES/dms-qml.tar.gz .
cd ~/rpmbuild/SOURCES
echo "Created dms-qml.tar.gz with md5sum: $(md5sum dms-qml.tar.gz | awk '{print $1}')"
rm -rf "$TEMP_DIR"
# Generate spec file
echo "Generating spec file..."
CHANGELOG_DATE="$(date '+%a %b %d %Y')"
cat >~/rpmbuild/SPECS/dms.spec <<'SPECEOF'
# Spec for DMS stable releases - Built locally
%global debug_package %{nil}
%global version VERSION_PLACEHOLDER
%global pkg_summary DankMaterialShell - Material 3 inspired shell for Wayland compositors
Name: dms
Version: %{version}
Release: RELEASE_PLACEHOLDER%{?dist}
Summary: %{pkg_summary}
License: MIT
URL: https://github.com/AvengeMedia/DankMaterialShell
Source0: dms-qml.tar.gz
BuildRequires: gzip
BuildRequires: wget
BuildRequires: systemd-rpm-macros
Requires: (quickshell or quickshell-git)
Requires: accountsservice
Requires: dms-cli = %{version}-%{release}
Requires: dgop
Recommends: cava
Recommends: cliphist
Recommends: danksearch
Recommends: matugen
Recommends: wl-clipboard
Recommends: NetworkManager
Recommends: qt6-qtmultimedia
Suggests: qt6ct
%description
DankMaterialShell (DMS) is a modern Wayland desktop shell built with Quickshell
and optimized for the niri and hyprland compositors. Features notifications,
app launcher, wallpaper customization, and fully customizable with plugins.
Includes auto-theming for GTK/Qt apps with matugen, 20+ customizable widgets,
process monitoring, notification center, clipboard history, dock, control center,
lock screen, and comprehensive plugin system.
%package -n dms-cli
Summary: DankMaterialShell CLI tool
License: MIT
URL: https://github.com/AvengeMedia/DankMaterialShell
%description -n dms-cli
Command-line interface for DankMaterialShell configuration and management.
Provides native DBus bindings, NetworkManager integration, and system utilities.
%prep
%setup -q -c -n dms-qml
# Download architecture-specific binaries during build
case "%{_arch}" in
x86_64)
ARCH_SUFFIX="amd64"
;;
aarch64)
ARCH_SUFFIX="arm64"
;;
*)
echo "Unsupported architecture: %{_arch}"
exit 1
;;
esac
wget -O %{_builddir}/dms-cli.gz "https://github.com/AvengeMedia/DankMaterialShell/releases/download/v%{version}/dms-distropkg-${ARCH_SUFFIX}.gz" || {
echo "Failed to download dms-cli for architecture %{_arch}"
exit 1
}
gunzip -c %{_builddir}/dms-cli.gz > %{_builddir}/dms-cli
chmod +x %{_builddir}/dms-cli
%build
%install
install -Dm755 %{_builddir}/dms-cli %{buildroot}%{_bindir}/dms
install -d %{buildroot}%{_datadir}/bash-completion/completions
install -d %{buildroot}%{_datadir}/zsh/site-functions
install -d %{buildroot}%{_datadir}/fish/vendor_completions.d
%{_builddir}/dms-cli completion bash > %{buildroot}%{_datadir}/bash-completion/completions/dms || :
%{_builddir}/dms-cli completion zsh > %{buildroot}%{_datadir}/zsh/site-functions/_dms || :
%{_builddir}/dms-cli completion fish > %{buildroot}%{_datadir}/fish/vendor_completions.d/dms.fish || :
install -Dm644 assets/systemd/dms.service %{buildroot}%{_userunitdir}/dms.service
install -Dm644 assets/dms-open.desktop %{buildroot}%{_datadir}/applications/dms-open.desktop
install -Dm644 assets/danklogo.svg %{buildroot}%{_datadir}/icons/hicolor/scalable/apps/danklogo.svg
install -dm755 %{buildroot}%{_datadir}/quickshell/dms
cp -r %{_builddir}/dms-qml/* %{buildroot}%{_datadir}/quickshell/dms/
rm -rf %{buildroot}%{_datadir}/quickshell/dms/.git*
rm -f %{buildroot}%{_datadir}/quickshell/dms/.gitignore
rm -rf %{buildroot}%{_datadir}/quickshell/dms/.github
rm -rf %{buildroot}%{_datadir}/quickshell/dms/distro
echo "%{version}" > %{buildroot}%{_datadir}/quickshell/dms/VERSION
%posttrans
if [ -d "%{_sysconfdir}/xdg/quickshell/dms" ]; then
rmdir "%{_sysconfdir}/xdg/quickshell/dms" 2>/dev/null || true
rmdir "%{_sysconfdir}/xdg/quickshell" 2>/dev/null || true
rmdir "%{_sysconfdir}/xdg" 2>/dev/null || true
fi
# Signal running DMS instances to reload
pkill -USR1 -x dms >/dev/null 2>&1 || :
%files
%license LICENSE
%doc README.md CONTRIBUTING.md
%{_datadir}/quickshell/dms/
%{_userunitdir}/dms.service
%{_datadir}/applications/dms-open.desktop
%{_datadir}/icons/hicolor/scalable/apps/danklogo.svg
%files -n dms-cli
%{_bindir}/dms
%{_datadir}/bash-completion/completions/dms
%{_datadir}/zsh/site-functions/_dms
%{_datadir}/fish/vendor_completions.d/dms.fish
%changelog
* CHANGELOG_DATE_PLACEHOLDER AvengeMedia <contact@avengemedia.com> - VERSION_PLACEHOLDER-1
- Stable release VERSION_PLACEHOLDER
- Built locally with corrected tarball
SPECEOF
sed -i "s/VERSION_PLACEHOLDER/${VERSION}/g" ~/rpmbuild/SPECS/dms.spec
sed -i "s/RELEASE_PLACEHOLDER/${RELEASE}/g" ~/rpmbuild/SPECS/dms.spec
sed -i "s/CHANGELOG_DATE_PLACEHOLDER/${CHANGELOG_DATE}/g" ~/rpmbuild/SPECS/dms.spec
# Build SRPM
echo "Building SRPM..."
cd ~/rpmbuild/SPECS
rpmbuild -bs dms.spec
SRPM=$(ls ~/rpmbuild/SRPMS/dms-"${VERSION}"-*.src.rpm | tail -n 1)
if [ ! -f "$SRPM" ]; then
echo "Error: SRPM not found!"
exit 1
fi
echo "SRPM built successfully: $SRPM"
# Check if copr-cli is installed
if ! command -v copr-cli &>/dev/null; then
echo ""
echo "copr-cli is not installed. Install it with:"
echo " pip install copr-cli"
echo ""
echo "Then configure it with your Copr API token in ~/.config/copr"
echo ""
echo "SRPM is ready at: $SRPM"
echo "Upload manually with: copr-cli build avengemedia/dms $SRPM"
exit 0
fi
# Upload to Copr
echo ""
echo "Uploading to Copr..."
if copr-cli build avengemedia/dms "$SRPM" --nowait; then
echo ""
echo "Build submitted successfully! Check status at:"
echo "https://copr.fedorainfracloud.org/coprs/avengemedia/dms/builds/"
else
echo ""
echo "Copr upload failed. You can manually upload the SRPM:"
echo " copr-cli build avengemedia/dms $SRPM"
echo ""
echo "Or upload via web interface:"
echo " https://copr.fedorainfracloud.org/coprs/avengemedia/dms/builds/"
echo ""
echo "SRPM location: $SRPM"
exit 1
fi
+51 -51
View File
@@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
# Unified OBS status checker for dms packages # Unified OBS status checker for dms packages
# Checks all platforms (Debian, OpenSUSE) and architectures (x86_64, aarch64) # Checks all platforms (Debian, OpenSUSE) and architectures (x86_64, aarch64)
# Only pulls logs if build failed # Only pulls logs if build failed
@@ -35,81 +35,81 @@ cd "$OBS_BASE" || {
for pkg in "${PACKAGES[@]}"; do for pkg in "${PACKAGES[@]}"; do
case "$pkg" in case "$pkg" in
dms) dms)
PROJECT="$OBS_BASE_PROJECT:dms" PROJECT="$OBS_BASE_PROJECT:dms"
;; ;;
dms-git) dms-git)
PROJECT="$OBS_BASE_PROJECT:dms-git" PROJECT="$OBS_BASE_PROJECT:dms-git"
;; ;;
*) *)
echo "Error: Unknown package '$pkg'" echo "Error: Unknown package '$pkg'"
continue continue
;; ;;
esac esac
(
echo "==========================================" echo "=========================================="
echo "=== $pkg ===" echo "=== $pkg ==="
echo "==========================================" echo "=========================================="
# Checkout if needed # Checkout if needed
if [[ ! -d "$PROJECT/$pkg" ]]; then if [[ ! -d "$PROJECT/$pkg" ]]; then
osc co "$PROJECT/$pkg" 2>&1 | tail -1 osc co "$PROJECT/$pkg" 2>&1 | tail -1
fi fi
cd "$PROJECT/$pkg" cd "$PROJECT/$pkg"
ALL_RESULTS=$(osc results 2>&1) ALL_RESULTS=$(osc results 2>&1)
# Check each repository and architecture # Check each repository and architecture
FAILED_BUILDS=() FAILED_BUILDS=()
for repo in "${REPOS[@]}"; do for repo in "${REPOS[@]}"; do
for arch in "${ARCHES[@]}"; do for arch in "${ARCHES[@]}"; do
STATUS=$(echo "$ALL_RESULTS" | grep "$repo.*$arch" | awk '{print $NF}' | head -1) STATUS=$(echo "$ALL_RESULTS" | grep "$repo.*$arch" | awk '{print $NF}' | head -1)
if [[ -n "$STATUS" ]]; then if [[ -n "$STATUS" ]]; then
# Color code status # Color code status
case "$STATUS" in case "$STATUS" in
succeeded) succeeded)
COLOR="\033[0;32m" # Green COLOR="\033[0;32m" # Green
SYMBOL="✅" SYMBOL="✅"
;; ;;
failed) failed)
COLOR="\033[0;31m" # Red COLOR="\033[0;31m" # Red
SYMBOL="❌" SYMBOL="❌"
FAILED_BUILDS+=("$repo $arch") FAILED_BUILDS+=("$repo $arch")
;; ;;
unresolvable) unresolvable)
COLOR="\033[0;33m" # Yellow COLOR="\033[0;33m" # Yellow
SYMBOL="⚠️" SYMBOL="⚠️"
;; ;;
*) *)
COLOR="\033[0;37m" # White COLOR="\033[0;37m" # White
SYMBOL="⏳" SYMBOL="⏳"
;; ;;
esac esac
echo -e " $SYMBOL $repo $arch: ${COLOR}$STATUS\033[0m" echo -e " $SYMBOL $repo $arch: ${COLOR}$STATUS\033[0m"
fi fi
done
done done
done
# Pull logs for failed builds # Pull logs for failed builds
if [[ ${#FAILED_BUILDS[@]} -gt 0 ]]; then if [[ ${#FAILED_BUILDS[@]} -gt 0 ]]; then
echo ""
echo " 📋 Fetching logs for failed builds..."
for build in "${FAILED_BUILDS[@]}"; do
read -r repo arch <<< "$build"
echo "" echo ""
echo " ────────────────────────────────────────────" echo " 📋 Fetching logs for failed builds..."
echo " Build log: $repo $arch" for build in "${FAILED_BUILDS[@]}"; do
echo " ────────────────────────────────────────────" read -r repo arch <<<"$build"
osc remotebuildlog "$PROJECT" "$pkg" "$repo" "$arch" 2>&1 | tail -100 echo ""
done echo " ────────────────────────────────────────────"
fi echo " Build log: $repo $arch"
echo " ────────────────────────────────────────────"
osc remotebuildlog "$PROJECT" "$pkg" "$repo" "$arch" 2>&1 | tail -100
done
fi
echo "" echo ""
cd - > /dev/null )
done done
echo "==========================================" echo "=========================================="
echo "Status check complete!" echo "Status check complete!"
+180 -122
View File
@@ -17,21 +17,21 @@ MESSAGE=""
for arg in "$@"; do for arg in "$@"; do
case "$arg" in case "$arg" in
debian) debian)
UPLOAD_DEBIAN=true UPLOAD_DEBIAN=true
UPLOAD_OPENSUSE=false UPLOAD_OPENSUSE=false
;; ;;
opensuse) opensuse)
UPLOAD_DEBIAN=false UPLOAD_DEBIAN=false
UPLOAD_OPENSUSE=true UPLOAD_OPENSUSE=true
;; ;;
*) *)
if [[ -z "$PACKAGE" ]]; then if [[ -z "$PACKAGE" ]]; then
PACKAGE="$arg" PACKAGE="$arg"
elif [[ -z "$MESSAGE" ]]; then elif [[ -z "$MESSAGE" ]]; then
MESSAGE="$arg" MESSAGE="$arg"
fi fi
;; ;;
esac esac
done done
@@ -46,12 +46,12 @@ if [[ -z "$PACKAGE" ]]; then
echo " 2. dms-git - Nightly DMS" echo " 2. dms-git - Nightly DMS"
echo " a. all" echo " a. all"
echo "" echo ""
read -p "Select package (1-${#AVAILABLE_PACKAGES[@]}, a): " selection read -r -p "Select package (1-${#AVAILABLE_PACKAGES[@]}, a): " selection
if [[ "$selection" == "a" ]] || [[ "$selection" == "all" ]]; then if [[ "$selection" == "a" ]] || [[ "$selection" == "all" ]]; then
PACKAGE="all" PACKAGE="all"
elif [[ "$selection" =~ ^[0-9]+$ ]] && [[ "$selection" -ge 1 ]] && [[ "$selection" -le ${#AVAILABLE_PACKAGES[@]} ]]; then elif [[ "$selection" =~ ^[0-9]+$ ]] && [[ "$selection" -ge 1 ]] && [[ "$selection" -le ${#AVAILABLE_PACKAGES[@]} ]]; then
PACKAGE="${AVAILABLE_PACKAGES[$((selection-1))]}" PACKAGE="${AVAILABLE_PACKAGES[$((selection - 1))]}"
else else
echo "Error: Invalid selection" echo "Error: Invalid selection"
exit 1 exit 1
@@ -124,16 +124,16 @@ if [[ ! -d "distro/debian/$PACKAGE" ]]; then
fi fi
case "$PACKAGE" in case "$PACKAGE" in
dms) dms)
PROJECT="dms" PROJECT="dms"
;; ;;
dms-git) dms-git)
PROJECT="dms-git" PROJECT="dms-git"
;; ;;
*) *)
echo "Error: Unknown package '$PACKAGE'" echo "Error: Unknown package '$PACKAGE'"
exit 1 exit 1
;; ;;
esac esac
OBS_PROJECT="${OBS_BASE_PROJECT}:${PROJECT}" OBS_PROJECT="${OBS_BASE_PROJECT}:${PROJECT}"
@@ -216,8 +216,8 @@ if [[ "$UPLOAD_OPENSUSE" == true ]] && [[ -f "distro/opensuse/$PACKAGE.spec" ]];
# However, we need to check if we are also updating Debian, or if this script is expected to continue. # However, we need to check if we are also updating Debian, or if this script is expected to continue.
# If this is OpenSUSE only run, we can exit. # If this is OpenSUSE only run, we can exit.
if [[ "$UPLOAD_DEBIAN" == false ]]; then if [[ "$UPLOAD_DEBIAN" == false ]]; then
echo "✅ No changes needed for OpenSUSE (not manual). Exiting." echo "✅ No changes needed for OpenSUSE (not manual). Exiting."
exit 0 exit 0
fi fi
fi fi
fi fi
@@ -235,7 +235,7 @@ if [[ "$UPLOAD_OPENSUSE" == true ]] && [[ "$UPLOAD_DEBIAN" == false ]] && [[ -f
echo " - OpenSUSE-only upload: creating source tarball" echo " - OpenSUSE-only upload: creating source tarball"
TEMP_DIR=$(mktemp -d) TEMP_DIR=$(mktemp -d)
trap "rm -rf $TEMP_DIR" EXIT trap 'rm -rf $TEMP_DIR' EXIT
if [[ -f "distro/debian/$PACKAGE/_service" ]] && grep -q "tar_scm" "distro/debian/$PACKAGE/_service"; then if [[ -f "distro/debian/$PACKAGE/_service" ]] && grep -q "tar_scm" "distro/debian/$PACKAGE/_service"; then
GIT_URL=$(grep -A 5 'name="tar_scm"' "distro/debian/$PACKAGE/_service" | grep "url" | sed 's/.*<param name="url">\(.*\)<\/param>.*/\1/') GIT_URL=$(grep -A 5 'name="tar_scm"' "distro/debian/$PACKAGE/_service" | grep "url" | sed 's/.*<param name="url">\(.*\)<\/param>.*/\1/')
@@ -244,8 +244,8 @@ if [[ "$UPLOAD_OPENSUSE" == true ]] && [[ "$UPLOAD_DEBIAN" == false ]] && [[ -f
if [[ -n "$GIT_URL" ]]; then if [[ -n "$GIT_URL" ]]; then
echo " Cloning git source from: $GIT_URL (revision: ${GIT_REVISION:-master})" echo " Cloning git source from: $GIT_URL (revision: ${GIT_REVISION:-master})"
SOURCE_DIR="$TEMP_DIR/dms-git-source" SOURCE_DIR="$TEMP_DIR/dms-git-source"
if git clone --depth 1 --branch "${GIT_REVISION:-master}" "$GIT_URL" "$SOURCE_DIR" 2>/dev/null || \ if git clone --depth 1 --branch "${GIT_REVISION:-master}" "$GIT_URL" "$SOURCE_DIR" 2>/dev/null ||
git clone --depth 1 "$GIT_URL" "$SOURCE_DIR" 2>/dev/null; then git clone --depth 1 "$GIT_URL" "$SOURCE_DIR" 2>/dev/null; then
cd "$SOURCE_DIR" cd "$SOURCE_DIR"
if [[ -n "$GIT_REVISION" ]]; then if [[ -n "$GIT_REVISION" ]]; then
git checkout "$GIT_REVISION" 2>/dev/null || true git checkout "$GIT_REVISION" 2>/dev/null || true
@@ -265,16 +265,16 @@ if [[ "$UPLOAD_OPENSUSE" == true ]] && [[ "$UPLOAD_DEBIAN" == false ]] && [[ -f
cd "$OBS_TARBALL_DIR" cd "$OBS_TARBALL_DIR"
case "$PACKAGE" in case "$PACKAGE" in
dms) dms)
DMS_VERSION=$(grep "^Version:" "$REPO_ROOT/distro/opensuse/$PACKAGE.spec" | sed 's/^Version:[[:space:]]*//' | head -1) DMS_VERSION=$(grep "^Version:" "$REPO_ROOT/distro/opensuse/$PACKAGE.spec" | sed 's/^Version:[[:space:]]*//' | head -1)
EXPECTED_DIR="DankMaterialShell-${DMS_VERSION}" EXPECTED_DIR="DankMaterialShell-${DMS_VERSION}"
;; ;;
dms-git) dms-git)
EXPECTED_DIR="dms-git-source" EXPECTED_DIR="dms-git-source"
;; ;;
*) *)
EXPECTED_DIR=$(basename "$SOURCE_DIR") EXPECTED_DIR=$(basename "$SOURCE_DIR")
;; ;;
esac esac
echo " Creating $SOURCE0 (directory: $EXPECTED_DIR)" echo " Creating $SOURCE0 (directory: $EXPECTED_DIR)"
@@ -295,7 +295,7 @@ fi
if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; then if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; then
# Use CHANGELOG_VERSION already set above, or get it if not set # Use CHANGELOG_VERSION already set above, or get it if not set
if [[ -z "$CHANGELOG_VERSION" ]]; then if [[ -z "$CHANGELOG_VERSION" ]]; then
CHANGELOG_VERSION=$(grep -m1 "^$PACKAGE" distro/debian/$PACKAGE/debian/changelog 2>/dev/null | sed 's/.*(\([^)]*\)).*/\1/' || echo "0.1.11") CHANGELOG_VERSION=$(grep -m1 "^$PACKAGE" distro/debian/"$PACKAGE"/debian/changelog 2>/dev/null | sed 's/.*(\([^)]*\)).*/\1/' || echo "0.1.11")
fi fi
# Determine source format # Determine source format
@@ -314,7 +314,7 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; t
VERSION="$CHANGELOG_VERSION" VERSION="$CHANGELOG_VERSION"
TEMP_DIR=$(mktemp -d) TEMP_DIR=$(mktemp -d)
trap "rm -rf $TEMP_DIR" EXIT trap 'rm -rf $TEMP_DIR' EXIT
COMBINED_TARBALL="${PACKAGE}_${VERSION}.tar.gz" COMBINED_TARBALL="${PACKAGE}_${VERSION}.tar.gz"
SOURCE_DIR="" SOURCE_DIR=""
@@ -326,8 +326,8 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; t
if [[ -n "$GIT_URL" ]]; then if [[ -n "$GIT_URL" ]]; then
echo " Cloning git source from: $GIT_URL (revision: ${GIT_REVISION:-master})" echo " Cloning git source from: $GIT_URL (revision: ${GIT_REVISION:-master})"
SOURCE_DIR="$TEMP_DIR/dms-git-source" SOURCE_DIR="$TEMP_DIR/dms-git-source"
if git clone --depth 1 --branch "${GIT_REVISION:-master}" "$GIT_URL" "$SOURCE_DIR" 2>/dev/null || \ if git clone --depth 1 --branch "${GIT_REVISION:-master}" "$GIT_URL" "$SOURCE_DIR" 2>/dev/null ||
git clone --depth 1 "$GIT_URL" "$SOURCE_DIR" 2>/dev/null; then git clone --depth 1 "$GIT_URL" "$SOURCE_DIR" 2>/dev/null; then
cd "$SOURCE_DIR" cd "$SOURCE_DIR"
if [[ -n "$GIT_REVISION" ]]; then if [[ -n "$GIT_REVISION" ]]; then
git checkout "$GIT_REVISION" 2>/dev/null || true git checkout "$GIT_REVISION" 2>/dev/null || true
@@ -341,14 +341,14 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; t
fi fi
fi fi
elif grep -q "download_url" "distro/debian/$PACKAGE/_service" && [[ "$PACKAGE" != "dms-git" ]]; then elif grep -q "download_url" "distro/debian/$PACKAGE/_service" && [[ "$PACKAGE" != "dms-git" ]]; then
ALL_PATHS=$(grep -A 5 '<service name="download_url">' "distro/debian/$PACKAGE/_service" | \ ALL_PATHS=$(grep -A 5 '<service name="download_url">' "distro/debian/$PACKAGE/_service" |
grep '<param name="path">' | \ grep '<param name="path">' |
sed 's/.*<param name="path">\(.*\)<\/param>.*/\1/') sed 's/.*<param name="path">\(.*\)<\/param>.*/\1/')
SOURCE_PATH="" SOURCE_PATH=""
for path in $ALL_PATHS; do for path in $ALL_PATHS; do
if echo "$path" | grep -qE "(source|archive|\.tar\.(gz|xz|bz2))" && \ if echo "$path" | grep -qE "(source|archive|\.tar\.(gz|xz|bz2))" &&
! echo "$path" | grep -qE "(distropkg|binary)"; then ! echo "$path" | grep -qE "(distropkg|binary)"; then
SOURCE_PATH="$path" SOURCE_PATH="$path"
break break
fi fi
@@ -385,8 +385,8 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; t
SOURCE_URL="${URL_PROTOCOL}://${URL_HOST}${URL_PATH}" SOURCE_URL="${URL_PROTOCOL}://${URL_HOST}${URL_PATH}"
echo " Downloading source from: $SOURCE_URL" echo " Downloading source from: $SOURCE_URL"
if wget -q -O "$TEMP_DIR/source-archive" "$SOURCE_URL" 2>/dev/null || \ if wget -q -O "$TEMP_DIR/source-archive" "$SOURCE_URL" 2>/dev/null ||
curl -L -f -s -o "$TEMP_DIR/source-archive" "$SOURCE_URL" 2>/dev/null; then curl -L -f -s -o "$TEMP_DIR/source-archive" "$SOURCE_URL" 2>/dev/null; then
cd "$TEMP_DIR" cd "$TEMP_DIR"
if [[ "$SOURCE_URL" == *.tar.xz ]]; then if [[ "$SOURCE_URL" == *.tar.xz ]]; then
tar -xJf source-archive tar -xJf source-archive
@@ -432,7 +432,7 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; t
echo " - Vendoring Go dependencies for offline OBS build..." echo " - Vendoring Go dependencies for offline OBS build..."
cd "$SOURCE_DIR/core" cd "$SOURCE_DIR/core"
if ! command -v go &> /dev/null; then if ! command -v go &>/dev/null; then
echo "ERROR: Go not found. Install Go to vendor dependencies." echo "ERROR: Go not found. Install Go to vendor dependencies."
echo " Install: sudo apt-get install golang-go (Debian/Ubuntu)" echo " Install: sudo apt-get install golang-go (Debian/Ubuntu)"
echo " or: sudo dnf install golang (Fedora)" echo " or: sudo dnf install golang (Fedora)"
@@ -465,53 +465,53 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; t
cd "$OBS_TARBALL_DIR" cd "$OBS_TARBALL_DIR"
case "$PACKAGE" in case "$PACKAGE" in
dms) dms)
if [[ -n "$CHANGELOG_VERSION" ]]; then if [[ -n "$CHANGELOG_VERSION" ]]; then
DMS_VERSION="$CHANGELOG_VERSION" DMS_VERSION="$CHANGELOG_VERSION"
else else
DMS_VERSION=$(grep "^Version:" "$REPO_ROOT/distro/opensuse/$PACKAGE.spec" | sed 's/^Version:[[:space:]]*//' | head -1) DMS_VERSION=$(grep "^Version:" "$REPO_ROOT/distro/opensuse/$PACKAGE.spec" | sed 's/^Version:[[:space:]]*//' | head -1)
fi fi
EXPECTED_DIR="DankMaterialShell-${DMS_VERSION}" EXPECTED_DIR="DankMaterialShell-${DMS_VERSION}"
echo " Creating $SOURCE0 (directory: $EXPECTED_DIR)" echo " Creating $SOURCE0 (directory: $EXPECTED_DIR)"
cp -r "$SOURCE_DIR" "$EXPECTED_DIR" cp -r "$SOURCE_DIR" "$EXPECTED_DIR"
if [[ "$SOURCE0" == *.tar.xz ]]; then if [[ "$SOURCE0" == *.tar.xz ]]; then
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cJf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cJf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR"
elif [[ "$SOURCE0" == *.tar.bz2 ]]; then elif [[ "$SOURCE0" == *.tar.bz2 ]]; then
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cjf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cjf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR"
else else
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR"
fi fi
rm -rf "$EXPECTED_DIR" rm -rf "$EXPECTED_DIR"
echo " Created $SOURCE0 ($(stat -c%s "$WORK_DIR/$SOURCE0" 2>/dev/null || echo 0) bytes)" echo " Created $SOURCE0 ($(stat -c%s "$WORK_DIR/$SOURCE0" 2>/dev/null || echo 0) bytes)"
;; ;;
dms-git) dms-git)
EXPECTED_DIR="dms-git-source" EXPECTED_DIR="dms-git-source"
echo " Creating $SOURCE0 (directory: $EXPECTED_DIR)" echo " Creating $SOURCE0 (directory: $EXPECTED_DIR)"
cp -r "$SOURCE_DIR" "$EXPECTED_DIR" cp -r "$SOURCE_DIR" "$EXPECTED_DIR"
if [[ "$SOURCE0" == *.tar.xz ]]; then if [[ "$SOURCE0" == *.tar.xz ]]; then
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cJf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cJf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR"
elif [[ "$SOURCE0" == *.tar.bz2 ]]; then elif [[ "$SOURCE0" == *.tar.bz2 ]]; then
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cjf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cjf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR"
else else
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/$SOURCE0" "$EXPECTED_DIR"
fi fi
rm -rf "$EXPECTED_DIR" rm -rf "$EXPECTED_DIR"
echo " Created $SOURCE0 ($(stat -c%s "$WORK_DIR/$SOURCE0" 2>/dev/null || echo 0) bytes)" echo " Created $SOURCE0 ($(stat -c%s "$WORK_DIR/$SOURCE0" 2>/dev/null || echo 0) bytes)"
;; ;;
*) *)
DIR_NAME=$(basename "$SOURCE_DIR") DIR_NAME=$(basename "$SOURCE_DIR")
echo " Creating $SOURCE0 (directory: $DIR_NAME)" echo " Creating $SOURCE0 (directory: $DIR_NAME)"
cp -r "$SOURCE_DIR" "$DIR_NAME" cp -r "$SOURCE_DIR" "$DIR_NAME"
if [[ "$SOURCE0" == *.tar.xz ]]; then if [[ "$SOURCE0" == *.tar.xz ]]; then
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cJf "$WORK_DIR/$SOURCE0" "$DIR_NAME" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cJf "$WORK_DIR/$SOURCE0" "$DIR_NAME"
elif [[ "$SOURCE0" == *.tar.bz2 ]]; then elif [[ "$SOURCE0" == *.tar.bz2 ]]; then
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cjf "$WORK_DIR/$SOURCE0" "$DIR_NAME" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -cjf "$WORK_DIR/$SOURCE0" "$DIR_NAME"
else else
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/$SOURCE0" "$DIR_NAME" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/$SOURCE0" "$DIR_NAME"
fi fi
rm -rf "$DIR_NAME" rm -rf "$DIR_NAME"
echo " Created $SOURCE0 ($(stat -c%s "$WORK_DIR/$SOURCE0" 2>/dev/null || echo 0) bytes)" echo " Created $SOURCE0 ($(stat -c%s "$WORK_DIR/$SOURCE0" 2>/dev/null || echo 0) bytes)"
;; ;;
esac esac
cd "$REPO_ROOT" cd "$REPO_ROOT"
rm -rf "$OBS_TARBALL_DIR" rm -rf "$OBS_TARBALL_DIR"
@@ -592,7 +592,7 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ -d "distro/debian/$PACKAGE/debian" ]]; t
BUILD_DEPS="debhelper-compat (= 13)" BUILD_DEPS="debhelper-compat (= 13)"
fi fi
cat > "$WORK_DIR/$PACKAGE.dsc" << EOF cat >"$WORK_DIR/$PACKAGE.dsc" <<EOF
Format: 3.0 (native) Format: 3.0 (native)
Source: $PACKAGE Source: $PACKAGE
Binary: $PACKAGE Binary: $PACKAGE
@@ -618,7 +618,7 @@ EOF
tar -czf "$WORK_DIR/debian.tar.gz" -C "distro/debian/$PACKAGE" debian/ tar -czf "$WORK_DIR/debian.tar.gz" -C "distro/debian/$PACKAGE" debian/
echo " - Generating $PACKAGE.dsc for quilt format" echo " - Generating $PACKAGE.dsc for quilt format"
cat > "$WORK_DIR/$PACKAGE.dsc" << EOF cat >"$WORK_DIR/$PACKAGE.dsc" <<EOF
Format: 3.0 (quilt) Format: 3.0 (quilt)
Source: $PACKAGE Source: $PACKAGE
Binary: $PACKAGE Binary: $PACKAGE
@@ -672,7 +672,20 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$SOURCE_FORMAT" == *"native"* ]] && [[
if [[ "$IS_MANUAL" == true ]]; then if [[ "$IS_MANUAL" == true ]]; then
echo "==> Detected rebuild of same base version $CHANGELOG_BASE, incrementing version" echo "==> Detected rebuild of same base version $CHANGELOG_BASE, incrementing version"
if [[ "$CHANGELOG_VERSION" =~ ^([0-9.]+)\+git$ ]]; then # If REBUILD_RELEASE is set, use that number directly
if [[ -n "${REBUILD_RELEASE:-}" ]]; then
if [[ "$CHANGELOG_VERSION" =~ ^([0-9.]+)\+git([0-9]+)(\.[a-f0-9]+)?$ ]]; then
BASE_VERSION="${BASH_REMATCH[1]}"
GIT_NUM="${BASH_REMATCH[2]}"
GIT_HASH="${BASH_REMATCH[3]}"
NEW_VERSION="${BASE_VERSION}+git${GIT_NUM}${GIT_HASH}ppa${REBUILD_RELEASE}"
echo " Using REBUILD_RELEASE=$REBUILD_RELEASE: $CHANGELOG_VERSION -> $NEW_VERSION"
else
BASE_VERSION=$(echo "$CHANGELOG_VERSION" | sed 's/ppa[0-9]*$//')
NEW_VERSION="${BASE_VERSION}ppa${REBUILD_RELEASE}"
echo " Using REBUILD_RELEASE=$REBUILD_RELEASE: $CHANGELOG_VERSION -> $NEW_VERSION"
fi
elif [[ "$CHANGELOG_VERSION" =~ ^([0-9.]+)\+git$ ]]; then
BASE_VERSION="${BASH_REMATCH[1]}" BASE_VERSION="${BASH_REMATCH[1]}"
NEW_VERSION="${BASE_VERSION}+gitppa1" NEW_VERSION="${BASE_VERSION}+gitppa1"
echo " Adding PPA number: $CHANGELOG_VERSION -> $NEW_VERSION" echo " Adding PPA number: $CHANGELOG_VERSION -> $NEW_VERSION"
@@ -704,11 +717,27 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$SOURCE_FORMAT" == *"native"* ]] && [[
fi fi
elif [[ "$CHANGELOG_VERSION" =~ ^([0-9.]+)(-([0-9]+))?$ ]]; then elif [[ "$CHANGELOG_VERSION" =~ ^([0-9.]+)(-([0-9]+))?$ ]]; then
BASE_VERSION="${BASH_REMATCH[1]}" BASE_VERSION="${BASH_REMATCH[1]}"
NEW_VERSION="${BASE_VERSION}ppa1" # Check if old DSC has ppa suffix even if changelog doesn't
echo " Warning: Native format cannot have Debian revision, converting to PPA format: $CHANGELOG_VERSION -> $NEW_VERSION" if [[ "$OLD_DSC_VERSION" =~ ppa([0-9]+)$ ]]; then
OLD_PPA_NUM="${BASH_REMATCH[1]}"
NEW_PPA_NUM=$((OLD_PPA_NUM + 1))
NEW_VERSION="${BASE_VERSION}ppa${NEW_PPA_NUM}"
echo " Incrementing PPA number from old DSC: $OLD_DSC_VERSION -> $NEW_VERSION"
else
NEW_VERSION="${BASE_VERSION}ppa1"
echo " Adding PPA number: $CHANGELOG_VERSION -> $NEW_VERSION"
fi
else else
NEW_VERSION="${CHANGELOG_VERSION}ppa1" # Check if old DSC has ppa suffix for unknown formats
echo " Warning: Could not parse version format, appending ppa1: $CHANGELOG_VERSION -> $NEW_VERSION" if [[ "$OLD_DSC_VERSION" =~ ppa([0-9]+)$ ]]; then
OLD_PPA_NUM="${BASH_REMATCH[1]}"
NEW_PPA_NUM=$((OLD_PPA_NUM + 1))
NEW_VERSION="${CHANGELOG_VERSION}ppa${NEW_PPA_NUM}"
echo " Incrementing PPA number from old DSC: $OLD_DSC_VERSION -> $NEW_VERSION"
else
NEW_VERSION="${CHANGELOG_VERSION}ppa1"
echo " Warning: Could not parse version format, appending ppa1: $CHANGELOG_VERSION -> $NEW_VERSION"
fi
fi fi
if [[ -z "$SOURCE_DIR" ]] || [[ ! -d "$SOURCE_DIR" ]] || [[ ! -d "$SOURCE_DIR/debian" ]]; then if [[ -z "$SOURCE_DIR" ]] || [[ ! -d "$SOURCE_DIR" ]] || [[ ! -d "$SOURCE_DIR/debian" ]]; then
@@ -734,10 +763,10 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$SOURCE_FORMAT" == *"native"* ]] && [[
if [[ -f "$REPO_CHANGELOG" ]]; then if [[ -f "$REPO_CHANGELOG" ]]; then
OLD_ENTRY_START=$(grep -n "^$PACKAGE (" "$REPO_CHANGELOG" | sed -n '2p' | cut -d: -f1) OLD_ENTRY_START=$(grep -n "^$PACKAGE (" "$REPO_CHANGELOG" | sed -n '2p' | cut -d: -f1)
if [[ -n "$OLD_ENTRY_START" ]]; then if [[ -n "$OLD_ENTRY_START" ]]; then
tail -n +$OLD_ENTRY_START "$REPO_CHANGELOG" tail -n +"$OLD_ENTRY_START" "$REPO_CHANGELOG"
fi fi
fi fi
} > "$TEMP_CHANGELOG" } >"$TEMP_CHANGELOG"
cp "$TEMP_CHANGELOG" "$SOURCE_CHANGELOG" cp "$TEMP_CHANGELOG" "$SOURCE_CHANGELOG"
rm -f "$TEMP_CHANGELOG" rm -f "$TEMP_CHANGELOG"
@@ -763,7 +792,15 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$SOURCE_FORMAT" == *"native"* ]] && [[
echo " Renaming $EXTRACTED to $EXPECTED_SOURCE_DIR" echo " Renaming $EXTRACTED to $EXPECTED_SOURCE_DIR"
mv "$EXTRACTED" "$EXPECTED_SOURCE_DIR" mv "$EXTRACTED" "$EXPECTED_SOURCE_DIR"
rm -f "$WORK_DIR/dms-source.tar.gz" rm -f "$WORK_DIR/dms-source.tar.gz"
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/dms-source.tar.gz" "$EXPECTED_SOURCE_DIR" if ! tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/dms-source.tar.gz" "$EXPECTED_SOURCE_DIR"; then
echo " Error: Failed to create dms-source.tar.gz"
ls -lah "$EXPECTED_SOURCE_DIR" | head -20
exit 1
fi
if [[ ! -f "$WORK_DIR/dms-source.tar.gz" ]]; then
echo " Error: dms-source.tar.gz was not created"
exit 1
fi
ROOT_DIR=$(tar -tf "$WORK_DIR/dms-source.tar.gz" | head -1 | cut -d/ -f1) ROOT_DIR=$(tar -tf "$WORK_DIR/dms-source.tar.gz" | head -1 | cut -d/ -f1)
if [[ "$ROOT_DIR" != "$EXPECTED_SOURCE_DIR" ]]; then if [[ "$ROOT_DIR" != "$EXPECTED_SOURCE_DIR" ]]; then
echo " Error: Recreated tarball has wrong root directory: $ROOT_DIR (expected $EXPECTED_SOURCE_DIR)" echo " Error: Recreated tarball has wrong root directory: $ROOT_DIR (expected $EXPECTED_SOURCE_DIR)"
@@ -811,10 +848,10 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$SOURCE_FORMAT" == *"native"* ]] && [[
if [[ -f "$REPO_CHANGELOG" ]]; then if [[ -f "$REPO_CHANGELOG" ]]; then
OLD_ENTRY_START=$(grep -n "^$PACKAGE (" "$REPO_CHANGELOG" | sed -n '2p' | cut -d: -f1) OLD_ENTRY_START=$(grep -n "^$PACKAGE (" "$REPO_CHANGELOG" | sed -n '2p' | cut -d: -f1)
if [[ -n "$OLD_ENTRY_START" ]]; then if [[ -n "$OLD_ENTRY_START" ]]; then
tail -n +$OLD_ENTRY_START "$REPO_CHANGELOG" tail -n +"$OLD_ENTRY_START" "$REPO_CHANGELOG"
fi fi
fi fi
} > "$TEMP_CHANGELOG" } >"$TEMP_CHANGELOG"
cp "$TEMP_CHANGELOG" "$EXPECTED_DIR/debian/changelog" cp "$TEMP_CHANGELOG" "$EXPECTED_DIR/debian/changelog"
rm -f "$TEMP_CHANGELOG" rm -f "$TEMP_CHANGELOG"
fi fi
@@ -839,11 +876,7 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$SOURCE_FORMAT" == *"native"* ]] && [[
tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/$COMBINED_TARBALL" "$TARBALL_BASE" tar --sort=name --mtime='2000-01-01 00:00:00' --owner=0 --group=0 -czf "$WORK_DIR/$COMBINED_TARBALL" "$TARBALL_BASE"
cd "$REPO_ROOT" cd "$REPO_ROOT"
fi fi
else
echo "==> Detected same version. Not a manual run, skipping Debian version increment."
echo "✅ No changes needed for Debian. Exiting."
exit 0
fi
TARBALL_SIZE=$(stat -c%s "$WORK_DIR/$COMBINED_TARBALL" 2>/dev/null || stat -f%z "$WORK_DIR/$COMBINED_TARBALL" 2>/dev/null) TARBALL_SIZE=$(stat -c%s "$WORK_DIR/$COMBINED_TARBALL" 2>/dev/null || stat -f%z "$WORK_DIR/$COMBINED_TARBALL" 2>/dev/null)
TARBALL_MD5=$(md5sum "$WORK_DIR/$COMBINED_TARBALL" | cut -d' ' -f1) TARBALL_MD5=$(md5sum "$WORK_DIR/$COMBINED_TARBALL" | cut -d' ' -f1)
@@ -872,7 +905,7 @@ if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$SOURCE_FORMAT" == *"native"* ]] && [[
BUILD_DEPS="debhelper-compat (= 13)" BUILD_DEPS="debhelper-compat (= 13)"
fi fi
cat > "$WORK_DIR/$PACKAGE.dsc" << EOF cat >"$WORK_DIR/$PACKAGE.dsc" <<EOF
Format: 3.0 (native) Format: 3.0 (native)
Source: $PACKAGE Source: $PACKAGE
Binary: $PACKAGE Binary: $PACKAGE
@@ -884,23 +917,48 @@ Files:
$TARBALL_MD5 $TARBALL_SIZE $COMBINED_TARBALL $TARBALL_MD5 $TARBALL_SIZE $COMBINED_TARBALL
EOF EOF
echo " - Updated changelog and recreated tarball with version $NEW_VERSION" echo " - Updated changelog and recreated tarball with version $NEW_VERSION"
else
echo "==> Detected same version. Not a manual run, skipping Debian version increment."
echo "✅ No changes needed for Debian. Exiting."
exit 0
fi
fi fi
fi fi
# Ensure we're in WORK_DIR and it exists
if [[ ! -d "$WORK_DIR" ]]; then
echo "ERROR: WORK_DIR does not exist: $WORK_DIR"
exit 1
fi
cd "$WORK_DIR" || {
echo "ERROR: Cannot cd to WORK_DIR: $WORK_DIR"
exit 1
}
find . -maxdepth 1 -type f \( -name "*.dsc" -o -name "*.spec" \) -exec grep -l "^<<<<<<< " {} \; 2>/dev/null | while read -r conflicted_file; do find . -maxdepth 1 -type f \( -name "*.dsc" -o -name "*.spec" \) -exec grep -l "^<<<<<<< " {} \; 2>/dev/null | while read -r conflicted_file; do
echo " Removing conflicted text file: $conflicted_file" echo " Removing conflicted text file: $conflicted_file"
rm -f "$conflicted_file" rm -f "$conflicted_file"
done done
# Ensure we're STILL in WORK_DIR before running osc commands
cd "$WORK_DIR" || {
echo "ERROR: Cannot cd to WORK_DIR: $WORK_DIR"
exit 1
}
echo "DEBUG: Current directory: $(pwd)"
echo "DEBUG: WORK_DIR=$WORK_DIR"
echo "DEBUG: Files in directory:"
ls -la 2>&1 | head -20
echo "==> Staging changes" echo "==> Staging changes"
echo "Files to upload:" echo "Files to upload:"
if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$UPLOAD_OPENSUSE" == true ]]; then if [[ "$UPLOAD_DEBIAN" == true ]] && [[ "$UPLOAD_OPENSUSE" == true ]]; then
ls -lh *.tar.gz *.tar.xz *.tar *.spec *.dsc _service 2>/dev/null | awk '{print " " $9 " (" $5 ")"}' ls -lh ./*.tar.gz ./*.tar.xz ./*.tar ./*.spec ./*.dsc _service 2>/dev/null | awk '{print " " $9 " (" $5 ")"}'
elif [[ "$UPLOAD_DEBIAN" == true ]]; then elif [[ "$UPLOAD_DEBIAN" == true ]]; then
ls -lh *.tar.gz *.dsc _service 2>/dev/null | awk '{print " " $9 " (" $5 ")"}' ls -lh ./*.tar.gz ./*.dsc _service 2>/dev/null | awk '{print " " $9 " (" $5 ")"}'
elif [[ "$UPLOAD_OPENSUSE" == true ]]; then elif [[ "$UPLOAD_OPENSUSE" == true ]]; then
ls -lh *.tar.gz *.tar.xz *.tar *.spec _service 2>/dev/null | awk '{print " " $9 " (" $5 ")"}' ls -lh ./*.tar.gz ./*.tar.xz ./*.tar ./*.spec _service 2>/dev/null | awk '{print " " $9 " (" $5 ")"}'
fi fi
echo "" echo ""
+164 -134
View File
@@ -1,4 +1,4 @@
#!/bin/bash #!/usr/bin/env bash
# Generic source package builder for DMS PPA packages # Generic source package builder for DMS PPA packages
# Usage: ./create-source.sh <package-dir> [ubuntu-series] # Usage: ./create-source.sh <package-dir> [ubuntu-series]
# #
@@ -50,9 +50,15 @@ fi
# Get absolute path # Get absolute path
PACKAGE_DIR=$(cd "$PACKAGE_DIR" && pwd) PACKAGE_DIR=$(cd "$PACKAGE_DIR" && pwd)
PACKAGE_NAME=$(basename "$PACKAGE_DIR") PACKAGE_NAME=$(basename "$PACKAGE_DIR")
PACKAGE_PARENT=$(dirname "$PACKAGE_DIR")
# Create temporary working directory (like OBS)
TEMP_WORK_DIR=$(mktemp -d -t ppa_build_work_XXXXXX)
trap 'rm -rf "$TEMP_WORK_DIR"' EXIT
info "Building source package for: $PACKAGE_NAME" info "Building source package for: $PACKAGE_NAME"
info "Package directory: $PACKAGE_DIR" info "Package directory: $PACKAGE_DIR"
info "Working directory: $TEMP_WORK_DIR"
info "Target Ubuntu series: $UBUNTU_SERIES" info "Target Ubuntu series: $UBUNTU_SERIES"
# Check for required files # Check for required files
@@ -73,7 +79,7 @@ done
# Verify GPG key is set up # Verify GPG key is set up
info "Checking GPG key setup..." info "Checking GPG key setup..."
if ! gpg --list-secret-keys &> /dev/null; then if ! gpg --list-secret-keys &>/dev/null; then
error "No GPG secret keys found. Please set up GPG first!" error "No GPG secret keys found. Please set up GPG first!"
error "See GPG_SETUP.md for instructions" error "See GPG_SETUP.md for instructions"
exit 1 exit 1
@@ -82,7 +88,7 @@ fi
success "GPG key found" success "GPG key found"
# Check if debuild is installed # Check if debuild is installed
if ! command -v debuild &> /dev/null; then if ! command -v debuild &>/dev/null; then
error "debuild not found. Install devscripts:" error "debuild not found. Install devscripts:"
error " sudo dnf install devscripts" error " sudo dnf install devscripts"
exit 1 exit 1
@@ -119,14 +125,19 @@ elif [[ -z "${GITHUB_ACTIONS:-}" ]] && [[ -z "${CI:-}" ]]; then
echo "==> Local/manual run detected (not in CI)" echo "==> Local/manual run detected (not in CI)"
fi fi
# Copy package to temp working directory
info "Copying package to working directory..."
cp -r "$PACKAGE_DIR" "$TEMP_WORK_DIR/"
WORK_PACKAGE_DIR="$TEMP_WORK_DIR/$PACKAGE_NAME"
# Detect package type and update version automatically # Detect package type and update version automatically
cd "$PACKAGE_DIR" cd "$WORK_PACKAGE_DIR"
# Function to get latest tag from GitHub # Function to get latest tag from GitHub
get_latest_tag() { get_latest_tag() {
local repo="$1" local repo="$1"
# Try GitHub API first (faster) # Try GitHub API first (faster)
if command -v curl &> /dev/null; then if command -v curl &>/dev/null; then
LATEST_TAG=$(curl -s "https://api.github.com/repos/$repo/releases/latest" 2>/dev/null | grep '"tag_name":' | sed 's/.*"tag_name": "\(.*\)".*/\1/' | head -1) LATEST_TAG=$(curl -s "https://api.github.com/repos/$repo/releases/latest" 2>/dev/null | grep '"tag_name":' | sed 's/.*"tag_name": "\(.*\)".*/\1/' | head -1)
if [ -n "$LATEST_TAG" ]; then if [ -n "$LATEST_TAG" ]; then
echo "$LATEST_TAG" | sed 's/^v//' echo "$LATEST_TAG" | sed 's/^v//'
@@ -164,63 +175,63 @@ fi
# Special handling for known packages # Special handling for known packages
case "$PACKAGE_NAME" in case "$PACKAGE_NAME" in
dms-git) dms-git)
IS_GIT_PACKAGE=true IS_GIT_PACKAGE=true
GIT_REPO="AvengeMedia/DankMaterialShell" GIT_REPO="AvengeMedia/DankMaterialShell"
SOURCE_DIR="dms-git-repo" SOURCE_DIR="dms-git-repo"
;; ;;
dms) dms)
GIT_REPO="AvengeMedia/DankMaterialShell" GIT_REPO="AvengeMedia/DankMaterialShell"
info "Downloading pre-built binaries and source for dms..." info "Downloading pre-built binaries and source for dms..."
# Get version from changelog (remove ppa suffix for both quilt and native formats) # Get version from changelog (remove ppa suffix for both quilt and native formats)
# Native: 0.5.2ppa1 -> 0.5.2, Quilt: 0.5.2-1ppa1 -> 0.5.2 # Native: 0.5.2ppa1 -> 0.5.2, Quilt: 0.5.2-1ppa1 -> 0.5.2
VERSION=$(dpkg-parsechangelog -S Version | sed 's/-[^-]*$//' | sed 's/ppa[0-9]*$//') VERSION=$(dpkg-parsechangelog -S Version | sed 's/-[^-]*$//' | sed 's/ppa[0-9]*$//')
# Download amd64 binary (will be included in source package) # Download amd64 binary (will be included in source package)
if [ ! -f "dms-distropkg-amd64.gz" ]; then if [ ! -f "dms-distropkg-amd64.gz" ]; then
info "Downloading dms binary for amd64..." info "Downloading dms binary for amd64..."
if wget -O dms-distropkg-amd64.gz "https://github.com/AvengeMedia/DankMaterialShell/releases/download/v${VERSION}/dms-distropkg-amd64.gz"; then if wget -O dms-distropkg-amd64.gz "https://github.com/AvengeMedia/DankMaterialShell/releases/download/v${VERSION}/dms-distropkg-amd64.gz"; then
success "amd64 binary downloaded" success "amd64 binary downloaded"
else else
error "Failed to download dms-distropkg-amd64.gz" error "Failed to download dms-distropkg-amd64.gz"
exit 1 exit 1
fi
fi fi
fi
# Download source tarball for QML files # Download source tarball for QML files
if [ ! -f "dms-source.tar.gz" ]; then if [ ! -f "dms-source.tar.gz" ]; then
info "Downloading dms source for QML files..." info "Downloading dms source for QML files..."
if wget -O dms-source.tar.gz "https://github.com/AvengeMedia/DankMaterialShell/archive/refs/tags/v${VERSION}.tar.gz"; then if wget -O dms-source.tar.gz "https://github.com/AvengeMedia/DankMaterialShell/archive/refs/tags/v${VERSION}.tar.gz"; then
success "source tarball downloaded" success "source tarball downloaded"
else else
error "Failed to download dms-source.tar.gz" error "Failed to download dms-source.tar.gz"
exit 1 exit 1
fi
fi fi
;; fi
dms-greeter) ;;
GIT_REPO="AvengeMedia/DankMaterialShell" dms-greeter)
info "Downloading source for dms-greeter..." GIT_REPO="AvengeMedia/DankMaterialShell"
VERSION=$(dpkg-parsechangelog -S Version | sed 's/-[^-]*$//' | sed 's/ppa[0-9]*$//') info "Downloading source for dms-greeter..."
VERSION=$(dpkg-parsechangelog -S Version | sed 's/-[^-]*$//' | sed 's/ppa[0-9]*$//')
if [ ! -f "dms-greeter-source.tar.gz" ]; then if [ ! -f "dms-greeter-source.tar.gz" ]; then
info "Downloading dms-greeter source..." info "Downloading dms-greeter source..."
if wget -O dms-greeter-source.tar.gz "https://github.com/AvengeMedia/DankMaterialShell/archive/refs/tags/v${VERSION}.tar.gz"; then if wget -O dms-greeter-source.tar.gz "https://github.com/AvengeMedia/DankMaterialShell/archive/refs/tags/v${VERSION}.tar.gz"; then
success "source tarball downloaded" success "source tarball downloaded"
else else
error "Failed to download dms-greeter-source.tar.gz" error "Failed to download dms-greeter-source.tar.gz"
exit 1 exit 1
fi
fi fi
;; fi
danksearch) ;;
# danksearch uses pre-built binary from releases danksearch)
GIT_REPO="AvengeMedia/danksearch" # danksearch uses pre-built binary from releases
;; GIT_REPO="AvengeMedia/danksearch"
dgop) ;;
# dgop uses pre-built binary from releases dgop)
GIT_REPO="AvengeMedia/dgop" # dgop uses pre-built binary from releases
;; GIT_REPO="AvengeMedia/dgop"
;;
esac esac
# Handle git packages # Handle git packages
@@ -283,22 +294,29 @@ if [ "$IS_GIT_PACKAGE" = true ] && [ -n "$GIT_REPO" ]; then
# Check if we're rebuilding the same commit (increment PPA number if so) # Check if we're rebuilding the same commit (increment PPA number if so)
BASE_VERSION="${UPSTREAM_VERSION}+git${GIT_COMMIT_COUNT}.${GIT_COMMIT_HASH}" BASE_VERSION="${UPSTREAM_VERSION}+git${GIT_COMMIT_COUNT}.${GIT_COMMIT_HASH}"
CURRENT_VERSION=$(dpkg-parsechangelog -S Version 2>/dev/null || echo "") CURRENT_VERSION=$(dpkg-parsechangelog -S Version 2>/dev/null || echo "")
PPA_NUM=1
# If current version matches the base version, increment PPA number # Use REBUILD_RELEASE if provided, otherwise auto-increment
# Escape special regex characters in BASE_VERSION for pattern matching if [[ -n "${REBUILD_RELEASE:-}" ]]; then
ESCAPED_BASE=$(echo "$BASE_VERSION" | sed 's/\./\\./g' | sed 's/+/\\+/g') PPA_NUM=$REBUILD_RELEASE
if [[ "$CURRENT_VERSION" =~ ^${ESCAPED_BASE}ppa([0-9]+)$ ]]; then info "Using REBUILD_RELEASE=$REBUILD_RELEASE for PPA number"
PPA_NUM=$((BASH_REMATCH[1] + 1))
if [[ "$IS_MANUAL" == true ]]; then
info "Detected rebuild of same commit (current: $CURRENT_VERSION), incrementing PPA number to $PPA_NUM"
else
info "Detected rebuild of same commit (current: $CURRENT_VERSION). Not a manual run, skipping."
success "No changes needed (commit matches)."
exit 0
fi
else else
info "New commit or first build, using PPA number $PPA_NUM" PPA_NUM=1
# If current version matches the base version, increment PPA number
# Escape special regex characters in BASE_VERSION for pattern matching
ESCAPED_BASE=$(echo "$BASE_VERSION" | sed 's/\./\\./g' | sed 's/+/\\+/g')
if [[ "$CURRENT_VERSION" =~ ^${ESCAPED_BASE}ppa([0-9]+)$ ]]; then
PPA_NUM=$((BASH_REMATCH[1] + 1))
if [[ "$IS_MANUAL" == true ]]; then
info "Detected rebuild of same commit (current: $CURRENT_VERSION), incrementing PPA number to $PPA_NUM"
else
info "Detected rebuild of same commit (current: $CURRENT_VERSION). Not a manual run, skipping."
success "No changes needed (commit matches)."
exit 0
fi
else
info "New commit or first build, using PPA number $PPA_NUM"
fi
fi fi
NEW_VERSION="${BASE_VERSION}ppa${PPA_NUM}" NEW_VERSION="${BASE_VERSION}ppa${PPA_NUM}"
@@ -309,7 +327,7 @@ if [ "$IS_GIT_PACKAGE" = true ] && [ -n "$GIT_REPO" ]; then
OLD_ENTRY_START=$(grep -n "^${SOURCE_NAME} (" debian/changelog | sed -n '2p' | cut -d: -f1) OLD_ENTRY_START=$(grep -n "^${SOURCE_NAME} (" debian/changelog | sed -n '2p' | cut -d: -f1)
if [ -n "$OLD_ENTRY_START" ]; then if [ -n "$OLD_ENTRY_START" ]; then
# Found second entry, use everything from there # Found second entry, use everything from there
CHANGELOG_CONTENT=$(tail -n +$OLD_ENTRY_START debian/changelog) CHANGELOG_CONTENT=$(tail -n +"$OLD_ENTRY_START" debian/changelog)
else else
# No second entry found, changelog will only have new entry # No second entry found, changelog will only have new entry
CHANGELOG_CONTENT="" CHANGELOG_CONTENT=""
@@ -323,10 +341,10 @@ if [ "$IS_GIT_PACKAGE" = true ] && [ -n "$GIT_REPO" ]; then
-- Avenge Media <AvengeMedia.US@gmail.com> $(date -R)" -- Avenge Media <AvengeMedia.US@gmail.com> $(date -R)"
# Write new changelog (new entry, blank line, then old entries) # Write new changelog (new entry, blank line, then old entries)
echo "$CHANGELOG_ENTRY" > debian/changelog echo "$CHANGELOG_ENTRY" >debian/changelog
if [ -n "$CHANGELOG_CONTENT" ]; then if [ -n "$CHANGELOG_CONTENT" ]; then
echo "" >> debian/changelog echo "" >>debian/changelog
echo "$CHANGELOG_CONTENT" >> debian/changelog echo "$CHANGELOG_CONTENT" >>debian/changelog
fi fi
success "Version updated to $NEW_VERSION" success "Version updated to $NEW_VERSION"
@@ -337,8 +355,8 @@ if [ "$IS_GIT_PACKAGE" = true ] && [ -n "$GIT_REPO" ]; then
# Save version info for dms-git build process # Save version info for dms-git build process
if [ "$PACKAGE_NAME" = "dms-git" ]; then if [ "$PACKAGE_NAME" = "dms-git" ]; then
info "Saving version info to .dms-version for build process..." info "Saving version info to .dms-version for build process..."
echo "VERSION=${UPSTREAM_VERSION}+git${GIT_COMMIT_COUNT}.${GIT_COMMIT_HASH}" > "$SOURCE_DIR/.dms-version" echo "VERSION=${UPSTREAM_VERSION}+git${GIT_COMMIT_COUNT}.${GIT_COMMIT_HASH}" >"$SOURCE_DIR/.dms-version"
echo "COMMIT=${GIT_COMMIT_HASH}" >> "$SOURCE_DIR/.dms-version" echo "COMMIT=${GIT_COMMIT_HASH}" >>"$SOURCE_DIR/.dms-version"
success "Version info saved: ${UPSTREAM_VERSION}+git${GIT_COMMIT_COUNT}.${GIT_COMMIT_HASH}" success "Version info saved: ${UPSTREAM_VERSION}+git${GIT_COMMIT_COUNT}.${GIT_COMMIT_HASH}"
# Vendor Go dependencies (Launchpad has no internet access) # Vendor Go dependencies (Launchpad has no internet access)
@@ -379,7 +397,7 @@ if [ "$IS_GIT_PACKAGE" = true ] && [ -n "$GIT_REPO" ]; then
/^\[source\.crates-io\]/ { printing=1 } /^\[source\.crates-io\]/ { printing=1 }
printing { print } printing { print }
/^directory = "vendor"$/ { exit } /^directory = "vendor"$/ { exit }
' > .cargo/config.toml ' >.cargo/config.toml
# Verify vendor directory was created # Verify vendor directory was created
if [ ! -d "vendor" ]; then if [ ! -d "vendor" ]; then
@@ -410,7 +428,6 @@ if [ "$IS_GIT_PACKAGE" = true ] && [ -n "$GIT_REPO" ]; then
fi fi
fi fi
success "Source prepared for packaging" success "Source prepared for packaging"
else else
error "Failed to clone $GIT_REPO" error "Failed to clone $GIT_REPO"
@@ -425,20 +442,27 @@ elif [ -n "$GIT_REPO" ]; then
LATEST_TAG=$(get_latest_tag "$GIT_REPO") LATEST_TAG=$(get_latest_tag "$GIT_REPO")
if [ -n "$LATEST_TAG" ]; then if [ -n "$LATEST_TAG" ]; then
# Check source format - native packages can't use dashes # Check source format - native packages can't use dashes
SOURCE_FORMAT=$(cat debian/source/format 2>/dev/null | head -1 || echo "3.0 (quilt)") SOURCE_FORMAT=$(head -1 debian/source/format 2>/dev/null || echo "3.0 (quilt)")
# Get current version to check if we need to increment PPA number # Get current version to check if we need to increment PPA number
CURRENT_VERSION=$(dpkg-parsechangelog -S Version 2>/dev/null || echo "") CURRENT_VERSION=$(dpkg-parsechangelog -S Version 2>/dev/null || echo "")
PPA_NUM=1
# Use REBUILD_RELEASE if provided, otherwise auto-increment
if [[ -n "${REBUILD_RELEASE:-}" ]]; then
PPA_NUM=$REBUILD_RELEASE
info "Using REBUILD_RELEASE=$REBUILD_RELEASE for PPA number"
else
PPA_NUM=1
fi
if [[ "$SOURCE_FORMAT" == *"native"* ]]; then if [[ "$SOURCE_FORMAT" == *"native"* ]]; then
# Native format: 0.2.1ppa1 (no dash, no revision) # Native format: 0.2.1ppa1 (no dash, no revision)
BASE_VERSION="${LATEST_TAG}" BASE_VERSION="${LATEST_TAG}"
# Check if we're rebuilding the same version (increment PPA number if so) # Check if we're rebuilding the same version (increment PPA number if so)
if [[ "$CURRENT_VERSION" =~ ^${LATEST_TAG}ppa([0-9]+)$ ]]; then if [[ -z "${REBUILD_RELEASE:-}" ]] && [[ "$CURRENT_VERSION" =~ ^${LATEST_TAG}ppa([0-9]+)$ ]]; then
PPA_NUM=$((BASH_REMATCH[1] + 1)) PPA_NUM=$((BASH_REMATCH[1] + 1))
info "Detected rebuild of same version (current: $CURRENT_VERSION), incrementing PPA number to $PPA_NUM" info "Detected rebuild of same version (current: $CURRENT_VERSION), incrementing PPA number to $PPA_NUM"
else elif [[ -z "${REBUILD_RELEASE:-}" ]]; then
info "New version or first build, using PPA number $PPA_NUM" info "New version or first build, using PPA number $PPA_NUM"
fi fi
NEW_VERSION="${BASE_VERSION}ppa${PPA_NUM}" NEW_VERSION="${BASE_VERSION}ppa${PPA_NUM}"
@@ -447,7 +471,7 @@ elif [ -n "$GIT_REPO" ]; then
BASE_VERSION="${LATEST_TAG}-1" BASE_VERSION="${LATEST_TAG}-1"
# Check if we're rebuilding the same version (increment PPA number if so) # Check if we're rebuilding the same version (increment PPA number if so)
ESCAPED_BASE=$(echo "$BASE_VERSION" | sed 's/\./\\./g' | sed 's/-/\\-/g') ESCAPED_BASE=$(echo "$BASE_VERSION" | sed 's/\./\\./g' | sed 's/-/\\-/g')
if [[ "$CURRENT_VERSION" =~ ^${ESCAPED_BASE}ppa([0-9]+)$ ]]; then if [[ -z "${REBUILD_RELEASE:-}" ]] && [[ "$CURRENT_VERSION" =~ ^${ESCAPED_BASE}ppa([0-9]+)$ ]]; then
PPA_NUM=$((BASH_REMATCH[1] + 1)) PPA_NUM=$((BASH_REMATCH[1] + 1))
if [[ "$IS_MANUAL" == true ]]; then if [[ "$IS_MANUAL" == true ]]; then
info "Detected rebuild of same version (current: $CURRENT_VERSION), incrementing PPA number to $PPA_NUM" info "Detected rebuild of same version (current: $CURRENT_VERSION), incrementing PPA number to $PPA_NUM"
@@ -456,7 +480,7 @@ elif [ -n "$GIT_REPO" ]; then
success "No changes needed (version matches)." success "No changes needed (version matches)."
exit 0 exit 0
fi fi
else elif [[ -z "${REBUILD_RELEASE:-}" ]]; then
info "New version or first build, using PPA number $PPA_NUM" info "New version or first build, using PPA number $PPA_NUM"
fi fi
NEW_VERSION="${BASE_VERSION}ppa${PPA_NUM}" NEW_VERSION="${BASE_VERSION}ppa${PPA_NUM}"
@@ -473,7 +497,7 @@ elif [ -n "$GIT_REPO" ]; then
# Get current changelog content - find the next package header line # Get current changelog content - find the next package header line
OLD_ENTRY_START=$(grep -n "^${SOURCE_NAME} (" debian/changelog | sed -n '2p' | cut -d: -f1) OLD_ENTRY_START=$(grep -n "^${SOURCE_NAME} (" debian/changelog | sed -n '2p' | cut -d: -f1)
if [ -n "$OLD_ENTRY_START" ]; then if [ -n "$OLD_ENTRY_START" ]; then
CHANGELOG_CONTENT=$(tail -n +$OLD_ENTRY_START debian/changelog) CHANGELOG_CONTENT=$(tail -n +"$OLD_ENTRY_START" debian/changelog)
else else
CHANGELOG_CONTENT="" CHANGELOG_CONTENT=""
fi fi
@@ -490,10 +514,10 @@ elif [ -n "$GIT_REPO" ]; then
* ${CHANGELOG_MSG} * ${CHANGELOG_MSG}
-- Avenge Media <AvengeMedia.US@gmail.com> $(date -R)" -- Avenge Media <AvengeMedia.US@gmail.com> $(date -R)"
echo "$CHANGELOG_ENTRY" > debian/changelog echo "$CHANGELOG_ENTRY" >debian/changelog
if [ -n "$CHANGELOG_CONTENT" ]; then if [ -n "$CHANGELOG_CONTENT" ]; then
echo "" >> debian/changelog echo "" >>debian/changelog
echo "$CHANGELOG_CONTENT" >> debian/changelog echo "$CHANGELOG_CONTENT" >>debian/changelog
fi fi
success "Version updated to $NEW_VERSION" success "Version updated to $NEW_VERSION"
else else
@@ -507,47 +531,47 @@ fi
# Handle packages that need pre-built binaries downloaded # Handle packages that need pre-built binaries downloaded
cd "$PACKAGE_DIR" cd "$PACKAGE_DIR"
case "$PACKAGE_NAME" in case "$PACKAGE_NAME" in
danksearch) danksearch)
info "Downloading pre-built binaries for danksearch..." info "Downloading pre-built binaries for danksearch..."
# Get version from changelog (remove ppa suffix for both quilt and native formats) # Get version from changelog (remove ppa suffix for both quilt and native formats)
# Native: 0.5.2ppa1 -> 0.5.2, Quilt: 0.5.2-1ppa1 -> 0.5.2 # Native: 0.5.2ppa1 -> 0.5.2, Quilt: 0.5.2-1ppa1 -> 0.5.2
VERSION=$(dpkg-parsechangelog -S Version | sed 's/-[^-]*$//' | sed 's/ppa[0-9]*$//') VERSION=$(dpkg-parsechangelog -S Version | sed 's/-[^-]*$//' | sed 's/ppa[0-9]*$//')
# Download both amd64 and arm64 binaries (will be included in source package) # Download both amd64 and arm64 binaries (will be included in source package)
# Launchpad can't download during build, so we include both architectures # Launchpad can't download during build, so we include both architectures
if [ ! -f "dsearch-amd64" ]; then if [ ! -f "dsearch-amd64" ]; then
info "Downloading dsearch binary for amd64..." info "Downloading dsearch binary for amd64..."
if wget -O dsearch-amd64.gz "https://github.com/AvengeMedia/danksearch/releases/download/v${VERSION}/dsearch-linux-amd64.gz"; then if wget -O dsearch-amd64.gz "https://github.com/AvengeMedia/danksearch/releases/download/v${VERSION}/dsearch-linux-amd64.gz"; then
gunzip dsearch-amd64.gz gunzip dsearch-amd64.gz
chmod +x dsearch-amd64 chmod +x dsearch-amd64
success "amd64 binary downloaded" success "amd64 binary downloaded"
else else
error "Failed to download dsearch-amd64.gz" error "Failed to download dsearch-amd64.gz"
exit 1 exit 1
fi
fi fi
fi
if [ ! -f "dsearch-arm64" ]; then if [ ! -f "dsearch-arm64" ]; then
info "Downloading dsearch binary for arm64..." info "Downloading dsearch binary for arm64..."
if wget -O dsearch-arm64.gz "https://github.com/AvengeMedia/danksearch/releases/download/v${VERSION}/dsearch-linux-arm64.gz"; then if wget -O dsearch-arm64.gz "https://github.com/AvengeMedia/danksearch/releases/download/v${VERSION}/dsearch-linux-arm64.gz"; then
gunzip dsearch-arm64.gz gunzip dsearch-arm64.gz
chmod +x dsearch-arm64 chmod +x dsearch-arm64
success "arm64 binary downloaded" success "arm64 binary downloaded"
else else
error "Failed to download dsearch-arm64.gz" error "Failed to download dsearch-arm64.gz"
exit 1 exit 1
fi
fi fi
;; fi
dgop) ;;
# dgop binary should already be committed in the repo dgop)
if [ ! -f "dgop" ]; then # dgop binary should already be committed in the repo
warn "dgop binary not found - should be committed to repo" if [ ! -f "dgop" ]; then
fi warn "dgop binary not found - should be committed to repo"
;; fi
;;
esac esac
cd - > /dev/null cd - >/dev/null
# Check if this version already exists on PPA (only in CI environment) # Check if this version already exists on PPA (only in CI environment)
if command -v rmadison >/dev/null 2>&1; then if command -v rmadison >/dev/null 2>&1; then
@@ -561,10 +585,10 @@ if command -v rmadison >/dev/null 2>&1; then
cd "$PACKAGE_DIR" cd "$PACKAGE_DIR"
# Still clean up extracted sources # Still clean up extracted sources
case "$PACKAGE_NAME" in case "$PACKAGE_NAME" in
dms-git) dms-git)
rm -rf DankMaterialShell-* rm -rf DankMaterialShell-*
success "Cleaned up DankMaterialShell-*/ directory" success "Cleaned up DankMaterialShell-*/ directory"
;; ;;
esac esac
exit 0 exit 0
fi fi
@@ -575,10 +599,12 @@ info "Building source package..."
echo echo
# Determine if we need to include orig tarball (-sa) or just debian changes (-sd) # Determine if we need to include orig tarball (-sa) or just debian changes (-sd)
# Check if .orig.tar.xz already exists in parent directory (previous build) # Check if .orig.tar.xz already exists in real parent directory (previous build)
ORIG_TARBALL="${PACKAGE_NAME}_${VERSION%.ppa*}.orig.tar.xz" ORIG_TARBALL="${PACKAGE_NAME}_${VERSION%.ppa*}.orig.tar.xz"
if [ -f "../$ORIG_TARBALL" ]; then if [ -f "$PACKAGE_PARENT/$ORIG_TARBALL" ]; then
info "Found existing orig tarball, using -sd (debian changes only)" info "Found existing orig tarball in $PACKAGE_PARENT, using -sd (debian changes only)"
# Copy it to temp parent so debuild can find it
cp "$PACKAGE_PARENT/$ORIG_TARBALL" "$TEMP_WORK_DIR/"
DEBUILD_SOURCE_FLAG="-sd" DEBUILD_SOURCE_FLAG="-sd"
else else
info "No existing orig tarball found, using -sa (include original source)" info "No existing orig tarball found, using -sa (include original source)"
@@ -592,15 +618,19 @@ if yes | DEBIAN_FRONTEND=noninteractive debuild -S $DEBUILD_SOURCE_FLAG -d; then
echo echo
success "Source package built successfully!" success "Source package built successfully!"
# Copy build artifacts back to parent directory
info "Copying build artifacts to $PACKAGE_PARENT..."
cp -v "$TEMP_WORK_DIR"/"${SOURCE_NAME}"_"${CHANGELOG_VERSION}"* "$PACKAGE_PARENT/" 2>/dev/null || true
# List generated files # List generated files
info "Generated files in $(dirname "$PACKAGE_DIR"):" info "Generated files in $PACKAGE_PARENT:"
ls -lh "$(dirname "$PACKAGE_DIR")"/${SOURCE_NAME}_${CHANGELOG_VERSION}* 2>/dev/null || true ls -lh "$PACKAGE_PARENT"/"${SOURCE_NAME}"_"${CHANGELOG_VERSION}"* 2>/dev/null || true
# Show what to do next # Show what to do next
echo echo
info "Next steps:" info "Next steps:"
echo " 1. Review the source package:" echo " 1. Review the source package:"
echo " cd $(dirname "$PACKAGE_DIR")" echo " cd $PACKAGE_PARENT"
echo " ls -lh ${SOURCE_NAME}_${CHANGELOG_VERSION}*" echo " ls -lh ${SOURCE_NAME}_${CHANGELOG_VERSION}*"
echo echo
echo " 2. Upload to PPA (stable):" echo " 2. Upload to PPA (stable):"
@@ -610,7 +640,7 @@ if yes | DEBIAN_FRONTEND=noninteractive debuild -S $DEBUILD_SOURCE_FLAG -d; then
echo " dput ppa:avengemedia/dms-git ${SOURCE_NAME}_${CHANGELOG_VERSION}_source.changes" echo " dput ppa:avengemedia/dms-git ${SOURCE_NAME}_${CHANGELOG_VERSION}_source.changes"
echo echo
echo " 4. Or use the upload script:" echo " 4. Or use the upload script:"
echo " ./upload-ppa.sh $(dirname "$PACKAGE_DIR")/${SOURCE_NAME}_${CHANGELOG_VERSION}_source.changes dms" echo " ./upload-ppa.sh $PACKAGE_PARENT/${SOURCE_NAME}_${CHANGELOG_VERSION}_source.changes dms"
else else
error "Source package build failed!" error "Source package build failed!"
+38 -13
View File
@@ -58,23 +58,18 @@ CHANGES_FILE=$(realpath "$CHANGES_FILE")
info "Uploading to PPA: ppa:avengemedia/$PPA_NAME" info "Uploading to PPA: ppa:avengemedia/$PPA_NAME"
info "Changes file: $CHANGES_FILE" info "Changes file: $CHANGES_FILE"
# Check if dput or lftp is installed # Check if dput is installed
UPLOAD_METHOD="" if command -v dput &>/dev/null; then
if command -v dput &> /dev/null; then info "dput found"
UPLOAD_METHOD="dput"
elif command -v lftp &> /dev/null; then
UPLOAD_METHOD="lftp"
warn "dput not found, using lftp as fallback"
else else
error "Neither dput nor lftp found. Install one with:" error "dput not found. Install with:"
error " sudo dnf install dput-ng # Preferred but broken on Fedora" error " sudo dnf install dput-ng"
error " sudo dnf install lftp # Alternative upload method"
exit 1 exit 1
fi fi
# Check if ~/.dput.cf exists # Check if ~/.dput.cf exists
if [ ! -f "$HOME/.dput.cf" ]; then if [ ! -f "$HOME/.dput.cf" ]; then
error "~/.dput.cf not found!" error "$HOME/.dput.cf not found!"
echo echo
info "Create it from template:" info "Create it from template:"
echo " cp $(dirname "$0")/../dput.cf.template ~/.dput.cf" echo " cp $(dirname "$0")/../dput.cf.template ~/.dput.cf"
@@ -150,7 +145,38 @@ fi
info "Uploading to Launchpad..." info "Uploading to Launchpad..."
echo echo
if dput "ppa:avengemedia/$PPA_NAME" "$CHANGES_FILE"; then UPLOAD_SUCCESS=false
if [ "$UPLOAD_METHOD" = "dput" ]; then
if dput "ppa:avengemedia/$PPA_NAME" "$CHANGES_FILE"; then
UPLOAD_SUCCESS=true
fi
elif [ "$UPLOAD_METHOD" = "lftp" ]; then
# Use lftp to upload to Launchpad PPA
CHANGES_DIR=$(dirname "$CHANGES_FILE")
CHANGES_BASENAME=$(basename "$CHANGES_FILE")
# Extract files to upload from .changes file
FILES_TO_UPLOAD=("$CHANGES_BASENAME")
while IFS= read -r line; do
if [[ "$line" =~ ^\ [a-f0-9]+\ [0-9]+\ [^\ ]+\ [^\ ]+\ (.+)$ ]]; then
FILES_TO_UPLOAD+=("${BASH_REMATCH[1]}")
fi
done < "$CHANGES_FILE"
# Build lftp command to upload all files
LFTP_COMMANDS="set ftp:ssl-allow no; open ftp://ppa.launchpad.net; user anonymous ''; cd ~avengemedia/ubuntu/$PPA_NAME/;"
for file in "${FILES_TO_UPLOAD[@]}"; do
LFTP_COMMANDS="$LFTP_COMMANDS put '$CHANGES_DIR/$file';"
done
LFTP_COMMANDS="$LFTP_COMMANDS bye"
if echo "$LFTP_COMMANDS" | lftp; then
UPLOAD_SUCCESS=true
fi
fi
if [ "$UPLOAD_SUCCESS" = true ]; then
echo echo
success "Upload successful!" success "Upload successful!"
echo echo
@@ -166,7 +192,6 @@ if dput "ppa:avengemedia/$PPA_NAME" "$CHANGES_FILE"; then
echo " sudo add-apt-repository ppa:avengemedia/$PPA_NAME" echo " sudo add-apt-repository ppa:avengemedia/$PPA_NAME"
echo " sudo apt update" echo " sudo apt update"
echo " sudo apt install $PACKAGE_NAME" echo " sudo apt install $PACKAGE_NAME"
else else
error "Upload failed!" error "Upload failed!"
echo echo
+37 -42
View File
@@ -99,10 +99,6 @@ info "Step 2: Uploading to PPA..."
if [ "$PPA_NAME" = "danklinux" ] || [ "$PPA_NAME" = "dms" ] || [ "$PPA_NAME" = "dms-git" ]; then if [ "$PPA_NAME" = "danklinux" ] || [ "$PPA_NAME" = "dms" ] || [ "$PPA_NAME" = "dms-git" ]; then
warn "Using lftp for upload" warn "Using lftp for upload"
# Extract version from changes file
VERSION=$(grep "^Version:" "$CHANGES_FILE" | awk '{print $2}')
SOURCE_NAME=$(grep "^Source:" "$CHANGES_FILE" | awk '{print $2}')
# Find all files to upload # Find all files to upload
BUILD_DIR=$(dirname "$CHANGES_FILE") BUILD_DIR=$(dirname "$CHANGES_FILE")
CHANGES_BASENAME=$(basename "$CHANGES_FILE") CHANGES_BASENAME=$(basename "$CHANGES_FILE")
@@ -133,7 +129,7 @@ if [ "$PPA_NAME" = "danklinux" ] || [ "$PPA_NAME" = "dms" ] || [ "$PPA_NAME" = "
# lftp build dir change # lftp build dir change
LFTP_SCRIPT=$(mktemp) LFTP_SCRIPT=$(mktemp)
cat > "$LFTP_SCRIPT" <<EOF cat >"$LFTP_SCRIPT" <<EOF
cd ~avengemedia/ubuntu/$PPA_NAME/ cd ~avengemedia/ubuntu/$PPA_NAME/
lcd $BUILD_DIR lcd $BUILD_DIR
mput $CHANGES_BASENAME mput $CHANGES_BASENAME
@@ -143,7 +139,7 @@ mput $BUILDINFO
bye bye
EOF EOF
if lftp -d ftp://anonymous:@ppa.launchpad.net < "$LFTP_SCRIPT"; then if lftp -d ftp://anonymous:@ppa.launchpad.net <"$LFTP_SCRIPT"; then
success "Upload successful!" success "Upload successful!"
rm -f "$LFTP_SCRIPT" rm -f "$LFTP_SCRIPT"
else else
@@ -197,41 +193,41 @@ if [ "$KEEP_BUILDS" = "false" ]; then
# Clean up downloaded binaries in package directory # Clean up downloaded binaries in package directory
case "$PACKAGE_NAME" in case "$PACKAGE_NAME" in
danksearch) danksearch)
if [ -f "$PACKAGE_DIR/dsearch-amd64" ]; then if [ -f "$PACKAGE_DIR/dsearch-amd64" ]; then
rm -f "$PACKAGE_DIR/dsearch-amd64" rm -f "$PACKAGE_DIR/dsearch-amd64"
REMOVED=$((REMOVED + 1)) REMOVED=$((REMOVED + 1))
fi fi
if [ -f "$PACKAGE_DIR/dsearch-arm64" ]; then if [ -f "$PACKAGE_DIR/dsearch-arm64" ]; then
rm -f "$PACKAGE_DIR/dsearch-arm64" rm -f "$PACKAGE_DIR/dsearch-arm64"
REMOVED=$((REMOVED + 1)) REMOVED=$((REMOVED + 1))
fi fi
;; ;;
dms) dms)
# Remove downloaded binaries and source # Remove downloaded binaries and source
if [ -f "$PACKAGE_DIR/dms-distropkg-amd64.gz" ]; then if [ -f "$PACKAGE_DIR/dms-distropkg-amd64.gz" ]; then
rm -f "$PACKAGE_DIR/dms-distropkg-amd64.gz" rm -f "$PACKAGE_DIR/dms-distropkg-amd64.gz"
REMOVED=$((REMOVED + 1)) REMOVED=$((REMOVED + 1))
fi fi
if [ -f "$PACKAGE_DIR/dms-source.tar.gz" ]; then if [ -f "$PACKAGE_DIR/dms-source.tar.gz" ]; then
rm -f "$PACKAGE_DIR/dms-source.tar.gz" rm -f "$PACKAGE_DIR/dms-source.tar.gz"
REMOVED=$((REMOVED + 1)) REMOVED=$((REMOVED + 1))
fi fi
;; ;;
dms-git) dms-git)
# Remove git source directory binary # Remove git source directory binary
if [ -d "$PACKAGE_DIR/dms-git-repo" ]; then if [ -d "$PACKAGE_DIR/dms-git-repo" ]; then
rm -rf "$PACKAGE_DIR/dms-git-repo" rm -rf "$PACKAGE_DIR/dms-git-repo"
REMOVED=$((REMOVED + 1)) REMOVED=$((REMOVED + 1))
fi fi
;; ;;
dms-greeter) dms-greeter)
# Remove downloaded source # Remove downloaded source
if [ -f "$PACKAGE_DIR/dms-greeter-source.tar.gz" ]; then if [ -f "$PACKAGE_DIR/dms-greeter-source.tar.gz" ]; then
rm -f "$PACKAGE_DIR/dms-greeter-source.tar.gz" rm -f "$PACKAGE_DIR/dms-greeter-source.tar.gz"
REMOVED=$((REMOVED + 1)) REMOVED=$((REMOVED + 1))
fi fi
;; ;;
esac esac
if [ $REMOVED -gt 0 ]; then if [ $REMOVED -gt 0 ]; then
@@ -246,4 +242,3 @@ fi
echo echo
success "Done!" success "Done!"
+3 -3
View File
@@ -1,5 +1,5 @@
dms (0.6.2ppa3) questing; urgency=medium dms (1.0.0ppa4) questing; urgency=medium
* Rebuild for packaging fixes (ppa3) * Rebuild for packaging fixes (ppa4)
-- Avenge Media <AvengeMedia.US@gmail.com> Sun, 23 Nov 2025 00:40:41 -0500 -- Avenge Media <AvengeMedia.US@gmail.com> Wed, 10 Dec 2025 12:56:23 -0500

Some files were not shown because too many files have changed in this diff Show More