-
Notifications
You must be signed in to change notification settings - Fork 8
/
duplexRsync.sh
executable file
·271 lines (216 loc) · 10.8 KB
/
duplexRsync.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
#!/bin/bash
# REQUIREMENT we need fswatch on both ends, run this to get it on ubuntu1604
#sudo add-apt-repository ppa:hadret/fswatch
#sudo apt-get update
#sudo apt-get install -y fswatch
printHelp(){
echo "USAGE: duplexRsync --remoteHost user@host
DuplexRsync requires fswatch on both ends, this tries to install it locally using brew(required).
on the remote end run:
sudo add-apt-repository ppa:hadret/fswatch
sudo apt-get update
sudo apt-get install -y fswatch
you need to specify:
--remoteHost ex: [email protected].
You can also optionally specify:
--remoteParent contains/will contain the remoteDir"
}
# if our arguments match this string, it's the socat fork trgger for remote change detection; increment sentinel and exit
if [ "$*" = "sentinelIncrement" ];
then
sentval=$(cat .____sentinel);sentval=$((sentval+1));echo $sentval > .____sentinel;
exit;
fi
if [ "$*" = "" ];
then
printHelp;
exit;
fi
# we need brew on macosx
if [ -z $(command -v brew) ];
then
printHelp;
exit
fi
# this is for macosx, we also need socat to create a socket to remote trigger rsync
brew install socat fswatch gnu-getopt
function randomLocalPort() {
localPort=42
localPort=$RANDOM;
let "localPort %= 999";
localPort="42$localPort"
}
function randomRemotePort() {
remotePort=42
remotePort=$RANDOM;
let "remotePort %= 999";
remotePort="42$remotePort"
}
if ! options=$(/usr/local/Cellar/gnu-getopt/*/bin/getopt -u -o hr:p: -l help,remoteHost:,remoteParent: -- "$@")
then
# something went wrong, getopt will put out an error message for us
exit 1
fi
set -- $options
while [ $# -gt 0 ]
do
case $1 in
# for options with required arguments, an additional shift is required
-h|--help ) printHelp; exit; shift;;
-r|--remoteHost ) remoteHost=$2; shift;;
-p|--remoteParent ) remoteParent=$2; shift;;
--) shift; break;;
#(-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;;
(*) break;;
esac
shift
done
if [ -z "$remoteHost" ];
then
echo "Missing Argument: --remoteHost"
printHelp;
exit;
fi
remoteDir=${PWD##*/}
remoteDir="$remoteParent$remoteDir"
if [ ! -f ~/.ssh/id_rsa.pub ];
then
echo "You need a key pair to use duplexRsync. You can generate one using: ssh-keygen -t rsa"
exit;
fi
# we'll need to ssh without pass - use public key crypto to ssh into remote end, rsync needs this
#we are copying our pubkey to ssh in without prompt
cat ~/.ssh/id_rsa.pub | ssh "$remoteHost" 'mkdir .ssh;pubkey=$(cat); touch .ssh/authorized_keys; if grep -q "$pubkey" ".ssh/authorized_keys"; then echo "puublic key for this user already present"; else echo $pubkey >> .ssh/authorized_keys;fi'
fswatchPath=$(ssh "$remoteHost" 'command -v fswatch')
#on macosx remote the $PATH variable is different when local or ssh, lets try with looking up the local path
if [ -z "$fswatchPath" ];
then
fswatchPath=$(ssh "$remoteHost" 'command -v /usr/local/bin/fswatch')
fi
if [ -z "$fswatchPath" ];
then
echo "ERROR: missing fswatch at remote end"
printHelp;
exit;
fi
# kill all remote fswatches for this path that might be lingering
ssh $remoteHost "pkill -P \$(ps alx | egrep '.*pipe_w.*____rsyncSignal.sh --pwd $PWD --port $remotePort' | awk '{print \$4}' | head -n 1)"
ssh $remoteHost "pkill -f '____rsyncSignal.sh --pwd $PWD'"
# if we have the ssh tunnel running this will match and we kill it; pwd args to prevent killing other folders being watched
pkill -f "rsyncSignal.sh --pwd $PWD"
# if we have a lingering socat kill it
# we shouldnt have one, this is a bad plan if using multple sockets
#pkill -f "sentinelIncrement.sh --pwd $PWD"
echo '0' > .____sentinel
#create localsocket to listen for remote changes
socatRes="not listening yet, we get a random port in the following loop";
while [ ! -z "$socatRes" ]
do
randomLocalPort;
socatRes="";
# frok call this script with a special argument that simply inccrement snetinel and exits
socatRes=$(socat TCP-LISTEN:$localPort,fork EXEC:"./duplexRsync.sh sentinelIncrement" 2>&1 &) &
# result should be empty when listen works
done;
echo "listening locally on:$localPort"
#for now we use the same port at both ends, this is a bit sloppy we should test to make sure it's not used with the ssh -R call
remotePort=$localPort
#we dump to a remote file the fswatch command that allows local running socat to get a signal of a remote change
# modification to add the -r switch to all subs excluding node_modules. This is required because fswatch will still iterate over all subdirs because the -e switch is a pattern, not a path
# if you get a bunch of: inotify_add_watch: No space left on device
# you will need to https://github.com/guard/listen/wiki/Increasing-the-amount-of-inotify-watchers
# check your current limit: cat /proc/sys/fs/inotify/max_user_watches
# ATTENTION: you cannot change this kernel param if running in an unpriviledged container, you'll need to run this in the hosting kernel's env
# echo fs.inotify.max_user_watches=524288 | tee -a /etc/sysctl.conf && sysctl -p; echo "increasing the limit of watches, cannot be done in unpriv container"
#echo "$fswatchPath -r -e \"node_modules\" -o . | while read f; do echo 1 | nc localhost $remotePort; done" | ssh $remoteHost "mkdir -p $remoteDir; cd $remoteDir; cat > .____rsyncSignal.sh"
absPath=$(ssh $remoteHost "mkdir -p $remoteDir; cd $remoteDir; pwd")
# we are exluding node_modules and folders starting with .
ssh $remoteHost "mkdir -p $remoteDir; cd $remoteDir; find $absPath -maxdepth 1 -mindepth 1 -type d ! -name \"node_modules\" ! -name \".*\"| awk '{ print \"\\\"\"\$0\"\\\"\"}' | nl | awk -F\\\" '{printf \"/usr/bin/fswatch -x --event Updated --event Created --event Removed --event Renamed --event MovedFrom --event MovedTo -r \\\"%s\\\" | while read f; do echo 1 | nc localhost $remotePort; done \& \n\", \$2, \$1, \$1}' > .____rsyncSignal.sh"
ssh $remoteHost "cd $remoteDir; echo \"/usr/bin/fswatch -x --event Updated --event Created --event Removed --event Renamed --event MovedFrom --event MovedTo -o $absPath | while read f; do echo 1 | nc localhost $remotePort; done\" >> .____rsyncSignal.sh"
# we are exluding node_modules and folders starting with .
# this should work, but there seems to be a bug in fswatch, so we are using multiple processes instead
#ssh $remoteHost "mkdir -p $remoteDir; cd $remoteDir; find $absPath -maxdepth 1 -mindepth 1 -type d ! -name \"node_modules\" ! -name \".*\" | awk '{ print \"\\\"\"\$0\"\\\"\"}' | awk -F\\\" '{printf \" \\\"%s\\\" \", \$2}' | (echo -n \" /usr/bin/fswatch -x --event Updated --event Created --event Removed --event Renamed --event MovedFrom --event MovedTo -r \" && cat) > .____rsyncSignal.sh"
#ssh $remoteHost "cd $remoteDir; echo \" | while read f; do if [ -z \\\"\$skip\\\" ]; then skip=\\\"recursive first msg is spurious\\\"; else echo 1 | nc localhost $remotePort; fi done & /usr/bin/fswatch -o $absPath | while read f; do echo 1 | nc localhost $remotePort; done\" >> .____rsyncSignal.sh"
#exit 1;
function duplex_rsync() {
# kill all remote fswatches, also supress kill notice in bash
ssh $remoteHost "pkill -P \$(ps alx | egrep '.*pipe_w.*____rsyncSignal.sh --pwd $PWD --port $remotePort' | awk '{print \$4}' | head -n 1) >/dev/null 2&>1"
# kill the remote fswatch while we sync, pwd arg used to prevent attempting to kill other watches; port prevent killing if 2 locals have the exact same path local
# also this discloses local path to remote end; dont think this is serious
ssh $remoteHost "pkill -f '____rsyncSignal.sh --pwd $PWD --port $remotePort'"
# also kill the tunnel
pkill -f "rsyncSignal.sh --pwd $PWD"
# order matters; if we got a remote trigger we'll process remote as src first to prevent restoring files that might have just been deleted
if [ "$trigger" = "remote" ];
then
rsync -auzP --exclude ".*/" --exclude ".____*" --exclude "node_modules" --delete "$remoteHost:$remoteDir/" .;
rsync -auzP --exclude ".*/" --exclude ".____*" --exclude "node_modules" --delete . "$remoteHost:$remoteDir";
else # local as src first
rsync -auzP --exclude ".*/" --exclude ".____*" --exclude "node_modules" --delete . "$remoteHost:$remoteDir";
rsync -auzP --exclude ".*/" --exclude ".____*" --exclude "node_modules" --delete "$remoteHost:$remoteDir/" .;
fi;
ssh -R localhost:$localPort:127.0.0.1:$remotePort $remoteHost "cd $remoteDir; bash .____rsyncSignal.sh --pwd $PWD --port $remotePort"&
#tunnelPid="$!"
# echo "tunnelPid:$tunnelPid"
}
lastSentinel=$(cat .____sentinel);
# we always start from the local dir
trigger=local;
# do a trial run to see if we'd delete files on the remote end
wouldDeleteCount=$(rsync -anuzP --exclude ".*/" --exclude ".____*" --exclude "node_modules" --delete . $remoteHost:$remoteDir/ | grep deleting | wc -l);
wouldDeleteCount="$(echo -e "${wouldDeleteCount}" | tr -d '[:space:]')"
wouldDeleteRemoteFiles=$(rsync -anuzP --exclude ".*/" --exclude ".____*" --exclude "node_modules" --delete . $remoteHost:$remoteDir/ | grep deleting);
if [ ! -z "$wouldDeleteRemoteFiles" ];
then
unset destroyAhead
unset localFileCount
localFileCount=$(find . -type f | egrep -v '\..+/' | egrep -v '\./duplexRsync.sh' | egrep -v '\./\.____*' | wc -l | tr -d '[:space:]')
# if the local directory is empty using same pattern as rsync above we always merge
if [ "$localFileCount" -eq 0 ]
then
destroyAhead="merge"
else
echo "WOULD delete count: $wouldDeleteCount"
echo "$wouldDeleteRemoteFiles"
fi
while ! [[ "$destroyAhead" =~ ^(destroy|merge|abort)$ ]]
do
if [ "$wouldDeleteCount" -gt 5 ]
then
major=" ----MAJOR----- ";
fi
if [ "$wouldDeleteCount" -gt 42 ]
then
major=" ----INTERSTELLAR BYPASS LEVEL----- ";
fi
echo "ATTENTION $major DESTRUCTION AHEAD: There is/are $wouldDeleteCount file(s) present in the remote folder that are not present locally. Could the remote folder be totally unrelated? Would you like to merge the folders by creeating these locally(merge),Sync and destroy(destroy) or abort?(merge/destroy/abort)"
read destroyAhead
done
if [ "$destroyAhead" = "abort" ];
then
exit;
elif [ "$destroyAhead" = "merge" ];
then
# sync from remote without delete
rsync -auzP --exclude ".*/" --exclude ".____*" --exclude "node_modules" "$remoteHost:$remoteDir/" .;
fi
fi;
duplex_rsync;fswatch -r -o . | while read f;
do
sentinel=$(cat .____sentinel);
echo "sentinel $sentinel lastSentinel: $lastSentinel"
sentinelInc=$((sentinel-lastSentinel));
# if the change is remote(incremented ____sentinel) lets slow down and wait to gobble multiple events
if [ $sentinelInc -gt 0 ]
then
echo 'remote change detected';
trigger=remote;
duplex_rsync;
sleep 3;
else
echo 'local change detected';
trigger=local;
duplex_rsync;
fi
lastSentinel=$sentinel;
done;