diff -Nru fsvs-1.1.14/autom4te.cache/output.0 fsvs-1.1.17/autom4te.cache/output.0 --- fsvs-1.1.14/autom4te.cache/output.0 2008-04-02 10:59:38.000000000 +0100 +++ fsvs-1.1.17/autom4te.cache/output.0 2008-10-29 09:03:23.000000000 +0000 @@ -3137,6 +3137,83 @@ fi +{ echo "$as_me:$LINENO: checking for svn_txdelta_apply in -lsvn_delta-1" >&5 +echo $ECHO_N "checking for svn_txdelta_apply in -lsvn_delta-1... $ECHO_C" >&6; } +if test "${ac_cv_lib_svn_delta_1_svn_txdelta_apply+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvn_delta-1 $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char svn_txdelta_apply (); +int +main () +{ +return svn_txdelta_apply (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_svn_delta_1_svn_txdelta_apply=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_svn_delta_1_svn_txdelta_apply=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_svn_delta_1_svn_txdelta_apply" >&5 +echo "${ECHO_T}$ac_cv_lib_svn_delta_1_svn_txdelta_apply" >&6; } +if test $ac_cv_lib_svn_delta_1_svn_txdelta_apply = yes; then + cat >>confdefs.h <<_ACEOF +@%:@define HAVE_LIBSVN_DELTA_1 1 +_ACEOF + + LIBS="-lsvn_delta-1 $LIBS" + +else + { { echo "$as_me:$LINENO: error: Sorry, can't find subversion. +See \`config.log' for more details." >&5 +echo "$as_me: error: Sorry, can't find subversion. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + + { echo "$as_me:$LINENO: checking for svn_ra_initialize in -lsvn_ra-1" >&5 echo $ECHO_N "checking for svn_ra_initialize in -lsvn_ra-1... $ECHO_C" >&6; } if test "${ac_cv_lib_svn_ra_1_svn_ra_initialize+set}" = set; then diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/autom4te.cache/requests /tmp/FWQOjxAJQ0/fsvs-1.1.17/autom4te.cache/requests --- fsvs-1.1.14/autom4te.cache/requests 2008-04-02 10:59:38.000000000 +0100 +++ fsvs-1.1.17/autom4te.cache/requests 2008-10-29 09:03:23.000000000 +0000 @@ -1,4 +1,4 @@ -# This file was generated by Autom4te Wed Mar 19 22:03:23 PDT 2008. +# This file was generated by Autom4te Mon Aug 11 20:59:21 PDT 2008. # It contains the lists of macros which have been traced. # It can be safely removed. diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/autom4te.cache/traces.0 /tmp/FWQOjxAJQ0/fsvs-1.1.17/autom4te.cache/traces.0 --- fsvs-1.1.14/autom4te.cache/traces.0 2008-04-02 10:59:38.000000000 +0100 +++ fsvs-1.1.17/autom4te.cache/traces.0 2008-10-29 09:03:23.000000000 +0000 @@ -224,353 +224,357 @@ #undef HAVE_LIBAPRUTIL_1]) m4trace:configure.in:101: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBAPRUTIL_1]) m4trace:configure.in:101: -1- m4_pattern_allow([^HAVE_LIBAPRUTIL_1$]) -m4trace:configure.in:103: -1- AH_OUTPUT([HAVE_LIBSVN_RA_1], [/* Define to 1 if you have the `svn_ra-1\' library (-lsvn_ra-1). */ +m4trace:configure.in:103: -1- AH_OUTPUT([HAVE_LIBSVN_DELTA_1], [/* Define to 1 if you have the `svn_delta-1\' library (-lsvn_delta-1). */ +#undef HAVE_LIBSVN_DELTA_1]) +m4trace:configure.in:103: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBSVN_DELTA_1]) +m4trace:configure.in:103: -1- m4_pattern_allow([^HAVE_LIBSVN_DELTA_1$]) +m4trace:configure.in:105: -1- AH_OUTPUT([HAVE_LIBSVN_RA_1], [/* Define to 1 if you have the `svn_ra-1\' library (-lsvn_ra-1). */ #undef HAVE_LIBSVN_RA_1]) -m4trace:configure.in:103: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBSVN_RA_1]) -m4trace:configure.in:103: -1- m4_pattern_allow([^HAVE_LIBSVN_RA_1$]) -m4trace:configure.in:105: -1- AH_OUTPUT([HAVE_LIBGDBM], [/* Define to 1 if you have the `gdbm\' library (-lgdbm). */ +m4trace:configure.in:105: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBSVN_RA_1]) +m4trace:configure.in:105: -1- m4_pattern_allow([^HAVE_LIBSVN_RA_1$]) +m4trace:configure.in:107: -1- AH_OUTPUT([HAVE_LIBGDBM], [/* Define to 1 if you have the `gdbm\' library (-lgdbm). */ #undef HAVE_LIBGDBM]) -m4trace:configure.in:105: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBGDBM]) -m4trace:configure.in:105: -1- m4_pattern_allow([^HAVE_LIBGDBM$]) -m4trace:configure.in:109: -1- AC_SUBST([GREP]) -m4trace:configure.in:109: -1- AC_SUBST_TRACE([GREP]) -m4trace:configure.in:109: -1- m4_pattern_allow([^GREP$]) -m4trace:configure.in:109: -1- AC_SUBST([GREP]) -m4trace:configure.in:109: -1- AC_SUBST_TRACE([GREP]) -m4trace:configure.in:109: -1- m4_pattern_allow([^GREP$]) -m4trace:configure.in:109: -1- AC_SUBST([EGREP]) -m4trace:configure.in:109: -1- AC_SUBST_TRACE([EGREP]) -m4trace:configure.in:109: -1- m4_pattern_allow([^EGREP$]) -m4trace:configure.in:109: -1- AC_SUBST([EGREP]) -m4trace:configure.in:109: -1- AC_SUBST_TRACE([EGREP]) -m4trace:configure.in:109: -1- m4_pattern_allow([^EGREP$]) -m4trace:configure.in:109: -1- AC_DEFINE_TRACE_LITERAL([STDC_HEADERS]) -m4trace:configure.in:109: -1- m4_pattern_allow([^STDC_HEADERS$]) -m4trace:configure.in:109: -1- AH_OUTPUT([STDC_HEADERS], [/* Define to 1 if you have the ANSI C header files. */ +m4trace:configure.in:107: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBGDBM]) +m4trace:configure.in:107: -1- m4_pattern_allow([^HAVE_LIBGDBM$]) +m4trace:configure.in:111: -1- AC_SUBST([GREP]) +m4trace:configure.in:111: -1- AC_SUBST_TRACE([GREP]) +m4trace:configure.in:111: -1- m4_pattern_allow([^GREP$]) +m4trace:configure.in:111: -1- AC_SUBST([GREP]) +m4trace:configure.in:111: -1- AC_SUBST_TRACE([GREP]) +m4trace:configure.in:111: -1- m4_pattern_allow([^GREP$]) +m4trace:configure.in:111: -1- AC_SUBST([EGREP]) +m4trace:configure.in:111: -1- AC_SUBST_TRACE([EGREP]) +m4trace:configure.in:111: -1- m4_pattern_allow([^EGREP$]) +m4trace:configure.in:111: -1- AC_SUBST([EGREP]) +m4trace:configure.in:111: -1- AC_SUBST_TRACE([EGREP]) +m4trace:configure.in:111: -1- m4_pattern_allow([^EGREP$]) +m4trace:configure.in:111: -1- AC_DEFINE_TRACE_LITERAL([STDC_HEADERS]) +m4trace:configure.in:111: -1- m4_pattern_allow([^STDC_HEADERS$]) +m4trace:configure.in:111: -1- AH_OUTPUT([STDC_HEADERS], [/* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_FCNTL_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_FCNTL_H], [/* Define to 1 if you have the header file. */ #undef HAVE_FCNTL_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_STDDEF_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_STDDEF_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STDDEF_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_STRING_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_STRING_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STRING_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_SYS_TIME_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_SYS_TIME_H], [/* Define to 1 if you have the header file. */ #undef HAVE_SYS_TIME_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_PCRE_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_PCRE_H], [/* Define to 1 if you have the header file. */ #undef HAVE_PCRE_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_SYS_TYPES_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_SYS_TYPES_H], [/* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_SYS_STAT_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_SYS_STAT_H], [/* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_STRING_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_STRING_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STRING_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_MEMORY_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_MEMORY_H], [/* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_STRINGS_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_STRINGS_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_INTTYPES_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_INTTYPES_H], [/* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_STDINT_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_STDINT_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H]) -m4trace:configure.in:110: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:112: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H]) -m4trace:configure.in:115: -1- AH_OUTPUT([HAVE_DIRENT_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. +m4trace:configure.in:117: -1- AH_OUTPUT([HAVE_DIRENT_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ #undef HAVE_DIRENT_H]) -m4trace:configure.in:115: -1- AH_OUTPUT([HAVE_SYS_NDIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. +m4trace:configure.in:117: -1- AH_OUTPUT([HAVE_SYS_NDIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ #undef HAVE_SYS_NDIR_H]) -m4trace:configure.in:115: -1- AH_OUTPUT([HAVE_SYS_DIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. +m4trace:configure.in:117: -1- AH_OUTPUT([HAVE_SYS_DIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ #undef HAVE_SYS_DIR_H]) -m4trace:configure.in:115: -1- AH_OUTPUT([HAVE_NDIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ +m4trace:configure.in:117: -1- AH_OUTPUT([HAVE_NDIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ #undef HAVE_NDIR_H]) -m4trace:configure.in:117: -1- AC_DEFINE_TRACE_LITERAL([HAVE_STRUCT_STAT_ST_MTIM]) -m4trace:configure.in:117: -1- m4_pattern_allow([^HAVE_STRUCT_STAT_ST_MTIM$]) -m4trace:configure.in:117: -1- AH_OUTPUT([HAVE_STRUCT_STAT_ST_MTIM], [/* Define to 1 if `st_mtim\' is member of `struct stat\'. */ +m4trace:configure.in:119: -1- AC_DEFINE_TRACE_LITERAL([HAVE_STRUCT_STAT_ST_MTIM]) +m4trace:configure.in:119: -1- m4_pattern_allow([^HAVE_STRUCT_STAT_ST_MTIM$]) +m4trace:configure.in:119: -1- AH_OUTPUT([HAVE_STRUCT_STAT_ST_MTIM], [/* Define to 1 if `st_mtim\' is member of `struct stat\'. */ #undef HAVE_STRUCT_STAT_ST_MTIM]) -m4trace:configure.in:125: -1- AC_DEFINE_TRACE_LITERAL([HAVE_VALGRIND]) -m4trace:configure.in:125: -1- m4_pattern_allow([^HAVE_VALGRIND$]) -m4trace:configure.in:125: -1- AH_OUTPUT([HAVE_VALGRIND], [/* compatible valgrind version found */ +m4trace:configure.in:127: -1- AC_DEFINE_TRACE_LITERAL([HAVE_VALGRIND]) +m4trace:configure.in:127: -1- m4_pattern_allow([^HAVE_VALGRIND$]) +m4trace:configure.in:127: -1- AH_OUTPUT([HAVE_VALGRIND], [/* compatible valgrind version found */ #undef HAVE_VALGRIND]) -m4trace:configure.in:130: -1- AH_OUTPUT([HAVE_LINUX_KDEV_T_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:132: -1- AH_OUTPUT([HAVE_LINUX_KDEV_T_H], [/* Define to 1 if you have the header file. */ #undef HAVE_LINUX_KDEV_T_H]) -m4trace:configure.in:133: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. +m4trace:configure.in:135: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:209: AC_HELP_STRING is expanded from... -configure.in:133: the top level]) -m4trace:configure.in:132: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_DEV_FAKE]) -m4trace:configure.in:132: -1- m4_pattern_allow([^ENABLE_DEV_FAKE$]) -m4trace:configure.in:138: -1- AC_SUBST([ENABLE_DEV_FAKE]) -m4trace:configure.in:138: -1- AC_SUBST_TRACE([ENABLE_DEV_FAKE]) -m4trace:configure.in:138: -1- m4_pattern_allow([^ENABLE_DEV_FAKE$]) -m4trace:configure.in:142: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. +configure.in:135: the top level]) +m4trace:configure.in:134: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_DEV_FAKE]) +m4trace:configure.in:134: -1- m4_pattern_allow([^ENABLE_DEV_FAKE$]) +m4trace:configure.in:140: -1- AC_SUBST([ENABLE_DEV_FAKE]) +m4trace:configure.in:140: -1- AC_SUBST_TRACE([ENABLE_DEV_FAKE]) +m4trace:configure.in:140: -1- m4_pattern_allow([^ENABLE_DEV_FAKE$]) +m4trace:configure.in:144: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:209: AC_HELP_STRING is expanded from... -configure.in:142: the top level]) -m4trace:configure.in:141: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_DEBUG]) -m4trace:configure.in:141: -1- m4_pattern_allow([^ENABLE_DEBUG$]) -m4trace:configure.in:147: -1- AC_SUBST([ENABLE_DEBUG]) -m4trace:configure.in:147: -1- AC_SUBST_TRACE([ENABLE_DEBUG]) -m4trace:configure.in:147: -1- m4_pattern_allow([^ENABLE_DEBUG$]) -m4trace:configure.in:151: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. +configure.in:144: the top level]) +m4trace:configure.in:143: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_DEBUG]) +m4trace:configure.in:143: -1- m4_pattern_allow([^ENABLE_DEBUG$]) +m4trace:configure.in:149: -1- AC_SUBST([ENABLE_DEBUG]) +m4trace:configure.in:149: -1- AC_SUBST_TRACE([ENABLE_DEBUG]) +m4trace:configure.in:149: -1- m4_pattern_allow([^ENABLE_DEBUG$]) +m4trace:configure.in:153: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:209: AC_HELP_STRING is expanded from... -configure.in:151: the top level]) -m4trace:configure.in:150: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_GCOV]) -m4trace:configure.in:150: -1- m4_pattern_allow([^ENABLE_GCOV$]) -m4trace:configure.in:156: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_GCOV]) -m4trace:configure.in:156: -1- m4_pattern_allow([^ENABLE_GCOV$]) -m4trace:configure.in:157: -1- AC_SUBST([ENABLE_GCOV]) -m4trace:configure.in:157: -1- AC_SUBST_TRACE([ENABLE_GCOV]) -m4trace:configure.in:157: -1- m4_pattern_allow([^ENABLE_GCOV$]) -m4trace:configure.in:165: -1- AC_DEFINE_TRACE_LITERAL([HAVE_O_DIRECTORY]) -m4trace:configure.in:165: -1- m4_pattern_allow([^HAVE_O_DIRECTORY$]) -m4trace:configure.in:165: -1- AH_OUTPUT([HAVE_O_DIRECTORY], [/* O_DIRECTORY found */ -#undef HAVE_O_DIRECTORY]) -m4trace:configure.in:167: -1- AC_SUBST([HAVE_O_DIRECTORY]) -m4trace:configure.in:167: -1- AC_SUBST_TRACE([HAVE_O_DIRECTORY]) +configure.in:153: the top level]) +m4trace:configure.in:152: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_GCOV]) +m4trace:configure.in:152: -1- m4_pattern_allow([^ENABLE_GCOV$]) +m4trace:configure.in:158: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_GCOV]) +m4trace:configure.in:158: -1- m4_pattern_allow([^ENABLE_GCOV$]) +m4trace:configure.in:159: -1- AC_SUBST([ENABLE_GCOV]) +m4trace:configure.in:159: -1- AC_SUBST_TRACE([ENABLE_GCOV]) +m4trace:configure.in:159: -1- m4_pattern_allow([^ENABLE_GCOV$]) +m4trace:configure.in:167: -1- AC_DEFINE_TRACE_LITERAL([HAVE_O_DIRECTORY]) m4trace:configure.in:167: -1- m4_pattern_allow([^HAVE_O_DIRECTORY$]) -m4trace:configure.in:172: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LOCALES]) -m4trace:configure.in:172: -1- m4_pattern_allow([^HAVE_LOCALES$]) -m4trace:configure.in:174: -1- AC_SUBST([HAVE_LOCALES]) -m4trace:configure.in:174: -1- AC_SUBST_TRACE([HAVE_LOCALES]) +m4trace:configure.in:167: -1- AH_OUTPUT([HAVE_O_DIRECTORY], [/* O_DIRECTORY found */ +#undef HAVE_O_DIRECTORY]) +m4trace:configure.in:169: -1- AC_SUBST([HAVE_O_DIRECTORY]) +m4trace:configure.in:169: -1- AC_SUBST_TRACE([HAVE_O_DIRECTORY]) +m4trace:configure.in:169: -1- m4_pattern_allow([^HAVE_O_DIRECTORY$]) +m4trace:configure.in:174: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LOCALES]) m4trace:configure.in:174: -1- m4_pattern_allow([^HAVE_LOCALES$]) -m4trace:configure.in:178: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. +m4trace:configure.in:176: -1- AC_SUBST([HAVE_LOCALES]) +m4trace:configure.in:176: -1- AC_SUBST_TRACE([HAVE_LOCALES]) +m4trace:configure.in:176: -1- m4_pattern_allow([^HAVE_LOCALES$]) +m4trace:configure.in:180: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:209: AC_HELP_STRING is expanded from... -configure.in:178: the top level]) -m4trace:configure.in:177: -1- AC_DEFINE_TRACE_LITERAL([CHROOTER_JAIL]) -m4trace:configure.in:177: -1- m4_pattern_allow([^CHROOTER_JAIL$]) -m4trace:configure.in:177: -1- AH_OUTPUT([CHROOTER_JAIL], [/* The path of a chroot jail. */ +configure.in:180: the top level]) +m4trace:configure.in:179: -1- AC_DEFINE_TRACE_LITERAL([CHROOTER_JAIL]) +m4trace:configure.in:179: -1- m4_pattern_allow([^CHROOTER_JAIL$]) +m4trace:configure.in:179: -1- AH_OUTPUT([CHROOTER_JAIL], [/* The path of a chroot jail. */ #undef CHROOTER_JAIL]) -m4trace:configure.in:190: -1- AC_SUBST([CHROOTER_JAIL]) -m4trace:configure.in:190: -1- AC_SUBST_TRACE([CHROOTER_JAIL]) -m4trace:configure.in:190: -1- m4_pattern_allow([^CHROOTER_JAIL$]) -m4trace:configure.in:194: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. +m4trace:configure.in:192: -1- AC_SUBST([CHROOTER_JAIL]) +m4trace:configure.in:192: -1- AC_SUBST_TRACE([CHROOTER_JAIL]) +m4trace:configure.in:192: -1- m4_pattern_allow([^CHROOTER_JAIL$]) +m4trace:configure.in:196: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:209: AC_HELP_STRING is expanded from... -configure.in:194: the top level]) -m4trace:configure.in:193: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_RELEASE]) -m4trace:configure.in:193: -1- m4_pattern_allow([^ENABLE_RELEASE$]) -m4trace:configure.in:199: -1- AC_SUBST([ENABLE_RELEASE]) -m4trace:configure.in:199: -1- AC_SUBST_TRACE([ENABLE_RELEASE]) -m4trace:configure.in:199: -1- m4_pattern_allow([^ENABLE_RELEASE$]) -m4trace:configure.in:207: -1- AH_OUTPUT([HAVE_GETDENTS64], [/* Define to 1 if you have the `getdents64\' function. */ +configure.in:196: the top level]) +m4trace:configure.in:195: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_RELEASE]) +m4trace:configure.in:195: -1- m4_pattern_allow([^ENABLE_RELEASE$]) +m4trace:configure.in:201: -1- AC_SUBST([ENABLE_RELEASE]) +m4trace:configure.in:201: -1- AC_SUBST_TRACE([ENABLE_RELEASE]) +m4trace:configure.in:201: -1- m4_pattern_allow([^ENABLE_RELEASE$]) +m4trace:configure.in:209: -1- AH_OUTPUT([HAVE_GETDENTS64], [/* Define to 1 if you have the `getdents64\' function. */ #undef HAVE_GETDENTS64]) -m4trace:configure.in:208: -1- AH_OUTPUT([HAVE_LINUX_TYPES_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:210: -1- AH_OUTPUT([HAVE_LINUX_TYPES_H], [/* Define to 1 if you have the header file. */ #undef HAVE_LINUX_TYPES_H]) -m4trace:configure.in:209: -1- AH_OUTPUT([HAVE_LINUX_UNISTD_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:211: -1- AH_OUTPUT([HAVE_LINUX_UNISTD_H], [/* Define to 1 if you have the header file. */ #undef HAVE_LINUX_UNISTD_H]) -m4trace:configure.in:210: -1- AC_DEFINE_TRACE_LITERAL([HAVE_COMPARISON_FN_T]) -m4trace:configure.in:210: -1- m4_pattern_allow([^HAVE_COMPARISON_FN_T$]) -m4trace:configure.in:210: -1- AH_OUTPUT([HAVE_COMPARISON_FN_T], [/* Define to 1 if the system has the type `comparison_fn_t\'. */ +m4trace:configure.in:212: -1- AC_DEFINE_TRACE_LITERAL([HAVE_COMPARISON_FN_T]) +m4trace:configure.in:212: -1- m4_pattern_allow([^HAVE_COMPARISON_FN_T$]) +m4trace:configure.in:212: -1- AH_OUTPUT([HAVE_COMPARISON_FN_T], [/* Define to 1 if the system has the type `comparison_fn_t\'. */ #undef HAVE_COMPARISON_FN_T]) -m4trace:configure.in:212: -1- AC_DEFINE_TRACE_LITERAL([_FILE_OFFSET_BITS]) -m4trace:configure.in:212: -1- m4_pattern_allow([^_FILE_OFFSET_BITS$]) -m4trace:configure.in:212: -1- AH_OUTPUT([_FILE_OFFSET_BITS], [/* Number of bits in a file offset, on hosts where this is settable. */ +m4trace:configure.in:214: -1- AC_DEFINE_TRACE_LITERAL([_FILE_OFFSET_BITS]) +m4trace:configure.in:214: -1- m4_pattern_allow([^_FILE_OFFSET_BITS$]) +m4trace:configure.in:214: -1- AH_OUTPUT([_FILE_OFFSET_BITS], [/* Number of bits in a file offset, on hosts where this is settable. */ #undef _FILE_OFFSET_BITS]) -m4trace:configure.in:212: -1- AC_DEFINE_TRACE_LITERAL([_LARGE_FILES]) -m4trace:configure.in:212: -1- m4_pattern_allow([^_LARGE_FILES$]) -m4trace:configure.in:212: -1- AH_OUTPUT([_LARGE_FILES], [/* Define for large files, on AIX-style hosts. */ +m4trace:configure.in:214: -1- AC_DEFINE_TRACE_LITERAL([_LARGE_FILES]) +m4trace:configure.in:214: -1- m4_pattern_allow([^_LARGE_FILES$]) +m4trace:configure.in:214: -1- AH_OUTPUT([_LARGE_FILES], [/* Define for large files, on AIX-style hosts. */ #undef _LARGE_FILES]) -m4trace:configure.in:215: -1- AC_DEFINE_TRACE_LITERAL([const]) -m4trace:configure.in:215: -1- m4_pattern_allow([^const$]) -m4trace:configure.in:215: -1- AH_OUTPUT([const], [/* Define to empty if `const\' does not conform to ANSI C. */ +m4trace:configure.in:217: -1- AC_DEFINE_TRACE_LITERAL([const]) +m4trace:configure.in:217: -1- m4_pattern_allow([^const$]) +m4trace:configure.in:217: -1- AH_OUTPUT([const], [/* Define to empty if `const\' does not conform to ANSI C. */ #undef const]) -m4trace:configure.in:216: -1- AH_OUTPUT([inline], [/* Define to `__inline__\' or `__inline\' if that\'s what the C compiler +m4trace:configure.in:218: -1- AH_OUTPUT([inline], [/* Define to `__inline__\' or `__inline\' if that\'s what the C compiler calls it, or to nothing if \'inline\' is not supported under any name. */ #ifndef __cplusplus #undef inline #endif]) -m4trace:configure.in:217: -1- AC_DEFINE_TRACE_LITERAL([HAVE_STRUCT_STAT_ST_RDEV]) -m4trace:configure.in:217: -1- m4_pattern_allow([^HAVE_STRUCT_STAT_ST_RDEV$]) -m4trace:configure.in:217: -1- AH_OUTPUT([HAVE_STRUCT_STAT_ST_RDEV], [/* Define to 1 if `st_rdev\' is member of `struct stat\'. */ +m4trace:configure.in:219: -1- AC_DEFINE_TRACE_LITERAL([HAVE_STRUCT_STAT_ST_RDEV]) +m4trace:configure.in:219: -1- m4_pattern_allow([^HAVE_STRUCT_STAT_ST_RDEV$]) +m4trace:configure.in:219: -1- AH_OUTPUT([HAVE_STRUCT_STAT_ST_RDEV], [/* Define to 1 if `st_rdev\' is member of `struct stat\'. */ #undef HAVE_STRUCT_STAT_ST_RDEV]) -m4trace:configure.in:218: -1- AC_DEFINE_TRACE_LITERAL([TIME_WITH_SYS_TIME]) -m4trace:configure.in:218: -1- m4_pattern_allow([^TIME_WITH_SYS_TIME$]) -m4trace:configure.in:218: -1- AH_OUTPUT([TIME_WITH_SYS_TIME], [/* Define to 1 if you can safely include both and . */ +m4trace:configure.in:220: -1- AC_DEFINE_TRACE_LITERAL([TIME_WITH_SYS_TIME]) +m4trace:configure.in:220: -1- m4_pattern_allow([^TIME_WITH_SYS_TIME$]) +m4trace:configure.in:220: -1- AH_OUTPUT([TIME_WITH_SYS_TIME], [/* Define to 1 if you can safely include both and . */ #undef TIME_WITH_SYS_TIME]) -m4trace:configure.in:219: -1- AC_DEFINE_TRACE_LITERAL([TM_IN_SYS_TIME]) -m4trace:configure.in:219: -1- m4_pattern_allow([^TM_IN_SYS_TIME$]) -m4trace:configure.in:219: -1- AH_OUTPUT([TM_IN_SYS_TIME], [/* Define to 1 if your declares `struct tm\'. */ +m4trace:configure.in:221: -1- AC_DEFINE_TRACE_LITERAL([TM_IN_SYS_TIME]) +m4trace:configure.in:221: -1- m4_pattern_allow([^TM_IN_SYS_TIME$]) +m4trace:configure.in:221: -1- AH_OUTPUT([TM_IN_SYS_TIME], [/* Define to 1 if your declares `struct tm\'. */ #undef TM_IN_SYS_TIME]) -m4trace:configure.in:221: -1- AC_DEFINE_TRACE_LITERAL([HAS_FASTCALL]) -m4trace:configure.in:221: -1- m4_pattern_allow([^HAS_FASTCALL$]) -m4trace:configure.in:222: -1- AC_SUBST([HAS_FASTCALL]) -m4trace:configure.in:222: -1- AC_SUBST_TRACE([HAS_FASTCALL]) -m4trace:configure.in:222: -1- m4_pattern_allow([^HAS_FASTCALL$]) -m4trace:configure.in:230: -1- AC_DEFINE_TRACE_LITERAL([_UINT32_T]) -m4trace:configure.in:230: -1- m4_pattern_allow([^_UINT32_T$]) -m4trace:configure.in:230: -1- AH_OUTPUT([_UINT32_T], [/* Define for Solaris 2.5.1 so the uint32_t typedef from , +m4trace:configure.in:223: -1- AC_DEFINE_TRACE_LITERAL([HAS_FASTCALL]) +m4trace:configure.in:223: -1- m4_pattern_allow([^HAS_FASTCALL$]) +m4trace:configure.in:224: -1- AC_SUBST([HAS_FASTCALL]) +m4trace:configure.in:224: -1- AC_SUBST_TRACE([HAS_FASTCALL]) +m4trace:configure.in:224: -1- m4_pattern_allow([^HAS_FASTCALL$]) +m4trace:configure.in:232: -1- AC_DEFINE_TRACE_LITERAL([_UINT32_T]) +m4trace:configure.in:232: -1- m4_pattern_allow([^_UINT32_T$]) +m4trace:configure.in:232: -1- AH_OUTPUT([_UINT32_T], [/* Define for Solaris 2.5.1 so the uint32_t typedef from , , or is not used. If the typedef was allowed, the #define below would cause a syntax error. */ #undef _UINT32_T]) -m4trace:configure.in:230: -1- AC_DEFINE_TRACE_LITERAL([uint32_t]) -m4trace:configure.in:230: -1- m4_pattern_allow([^uint32_t$]) -m4trace:configure.in:230: -1- AH_OUTPUT([uint32_t], [/* Define to the type of an unsigned integer type of width exactly 32 bits if +m4trace:configure.in:232: -1- AC_DEFINE_TRACE_LITERAL([uint32_t]) +m4trace:configure.in:232: -1- m4_pattern_allow([^uint32_t$]) +m4trace:configure.in:232: -1- AH_OUTPUT([uint32_t], [/* Define to the type of an unsigned integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ #undef uint32_t]) -m4trace:configure.in:231: -1- AC_SUBST([HAVE_UINT32_T]) -m4trace:configure.in:231: -1- AC_SUBST_TRACE([HAVE_UINT32_T]) -m4trace:configure.in:231: -1- m4_pattern_allow([^HAVE_UINT32_T$]) -m4trace:configure.in:237: -1- AC_DEFINE_TRACE_LITERAL([AC_CV_C_UINT32_T]) -m4trace:configure.in:237: -1- m4_pattern_allow([^AC_CV_C_UINT32_T$]) -m4trace:configure.in:239: -1- AC_DEFINE_TRACE_LITERAL([_UINT64_T]) -m4trace:configure.in:239: -1- m4_pattern_allow([^_UINT64_T$]) -m4trace:configure.in:239: -1- AH_OUTPUT([_UINT64_T], [/* Define for Solaris 2.5.1 so the uint64_t typedef from , +m4trace:configure.in:233: -1- AC_SUBST([HAVE_UINT32_T]) +m4trace:configure.in:233: -1- AC_SUBST_TRACE([HAVE_UINT32_T]) +m4trace:configure.in:233: -1- m4_pattern_allow([^HAVE_UINT32_T$]) +m4trace:configure.in:239: -1- AC_DEFINE_TRACE_LITERAL([AC_CV_C_UINT32_T]) +m4trace:configure.in:239: -1- m4_pattern_allow([^AC_CV_C_UINT32_T$]) +m4trace:configure.in:241: -1- AC_DEFINE_TRACE_LITERAL([_UINT64_T]) +m4trace:configure.in:241: -1- m4_pattern_allow([^_UINT64_T$]) +m4trace:configure.in:241: -1- AH_OUTPUT([_UINT64_T], [/* Define for Solaris 2.5.1 so the uint64_t typedef from , , or is not used. If the typedef was allowed, the #define below would cause a syntax error. */ #undef _UINT64_T]) -m4trace:configure.in:239: -1- AC_DEFINE_TRACE_LITERAL([uint64_t]) -m4trace:configure.in:239: -1- m4_pattern_allow([^uint64_t$]) -m4trace:configure.in:239: -1- AH_OUTPUT([uint64_t], [/* Define to the type of an unsigned integer type of width exactly 64 bits if +m4trace:configure.in:241: -1- AC_DEFINE_TRACE_LITERAL([uint64_t]) +m4trace:configure.in:241: -1- m4_pattern_allow([^uint64_t$]) +m4trace:configure.in:241: -1- AH_OUTPUT([uint64_t], [/* Define to the type of an unsigned integer type of width exactly 64 bits if such a type exists and the standard includes do not define it. */ #undef uint64_t]) -m4trace:configure.in:240: -1- AC_SUBST([HAVE_UINT64_T]) -m4trace:configure.in:240: -1- AC_SUBST_TRACE([HAVE_UINT64_T]) -m4trace:configure.in:240: -1- m4_pattern_allow([^HAVE_UINT64_T$]) -m4trace:configure.in:245: -1- AC_DEFINE_TRACE_LITERAL([AC_CV_C_UINT64_T]) -m4trace:configure.in:245: -1- m4_pattern_allow([^AC_CV_C_UINT64_T$]) -m4trace:configure.in:249: -1- AC_DEFINE_TRACE_LITERAL([uid_t]) -m4trace:configure.in:249: -1- m4_pattern_allow([^uid_t$]) -m4trace:configure.in:249: -1- AH_OUTPUT([uid_t], [/* Define to `int\' if doesn\'t define. */ +m4trace:configure.in:242: -1- AC_SUBST([HAVE_UINT64_T]) +m4trace:configure.in:242: -1- AC_SUBST_TRACE([HAVE_UINT64_T]) +m4trace:configure.in:242: -1- m4_pattern_allow([^HAVE_UINT64_T$]) +m4trace:configure.in:247: -1- AC_DEFINE_TRACE_LITERAL([AC_CV_C_UINT64_T]) +m4trace:configure.in:247: -1- m4_pattern_allow([^AC_CV_C_UINT64_T$]) +m4trace:configure.in:251: -1- AC_DEFINE_TRACE_LITERAL([uid_t]) +m4trace:configure.in:251: -1- m4_pattern_allow([^uid_t$]) +m4trace:configure.in:251: -1- AH_OUTPUT([uid_t], [/* Define to `int\' if doesn\'t define. */ #undef uid_t]) -m4trace:configure.in:249: -1- AC_DEFINE_TRACE_LITERAL([gid_t]) -m4trace:configure.in:249: -1- m4_pattern_allow([^gid_t$]) -m4trace:configure.in:249: -1- AH_OUTPUT([gid_t], [/* Define to `int\' if doesn\'t define. */ +m4trace:configure.in:251: -1- AC_DEFINE_TRACE_LITERAL([gid_t]) +m4trace:configure.in:251: -1- m4_pattern_allow([^gid_t$]) +m4trace:configure.in:251: -1- AH_OUTPUT([gid_t], [/* Define to `int\' if doesn\'t define. */ #undef gid_t]) -m4trace:configure.in:249: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:251: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H]) -m4trace:configure.in:249: -1- AC_DEFINE_TRACE_LITERAL([HAVE_CHOWN]) -m4trace:configure.in:249: -1- m4_pattern_allow([^HAVE_CHOWN$]) -m4trace:configure.in:249: -1- AH_OUTPUT([HAVE_CHOWN], [/* Define to 1 if your system has a working `chown\' function. */ +m4trace:configure.in:251: -1- AC_DEFINE_TRACE_LITERAL([HAVE_CHOWN]) +m4trace:configure.in:251: -1- m4_pattern_allow([^HAVE_CHOWN$]) +m4trace:configure.in:251: -1- AH_OUTPUT([HAVE_CHOWN], [/* Define to 1 if your system has a working `chown\' function. */ #undef HAVE_CHOWN]) -m4trace:configure.in:250: -1- AC_DEFINE_TRACE_LITERAL([pid_t]) -m4trace:configure.in:250: -1- m4_pattern_allow([^pid_t$]) -m4trace:configure.in:250: -1- AH_OUTPUT([pid_t], [/* Define to `int\' if does not define. */ +m4trace:configure.in:252: -1- AC_DEFINE_TRACE_LITERAL([pid_t]) +m4trace:configure.in:252: -1- m4_pattern_allow([^pid_t$]) +m4trace:configure.in:252: -1- AH_OUTPUT([pid_t], [/* Define to `int\' if does not define. */ #undef pid_t]) -m4trace:configure.in:250: -1- AH_OUTPUT([HAVE_VFORK_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:252: -1- AH_OUTPUT([HAVE_VFORK_H], [/* Define to 1 if you have the header file. */ #undef HAVE_VFORK_H]) -m4trace:configure.in:250: -1- AH_OUTPUT([HAVE_FORK], [/* Define to 1 if you have the `fork\' function. */ +m4trace:configure.in:252: -1- AH_OUTPUT([HAVE_FORK], [/* Define to 1 if you have the `fork\' function. */ #undef HAVE_FORK]) -m4trace:configure.in:250: -1- AH_OUTPUT([HAVE_VFORK], [/* Define to 1 if you have the `vfork\' function. */ +m4trace:configure.in:252: -1- AH_OUTPUT([HAVE_VFORK], [/* Define to 1 if you have the `vfork\' function. */ #undef HAVE_VFORK]) -m4trace:configure.in:250: -1- AC_DEFINE_TRACE_LITERAL([HAVE_WORKING_VFORK]) -m4trace:configure.in:250: -1- m4_pattern_allow([^HAVE_WORKING_VFORK$]) -m4trace:configure.in:250: -1- AH_OUTPUT([HAVE_WORKING_VFORK], [/* Define to 1 if `vfork\' works. */ +m4trace:configure.in:252: -1- AC_DEFINE_TRACE_LITERAL([HAVE_WORKING_VFORK]) +m4trace:configure.in:252: -1- m4_pattern_allow([^HAVE_WORKING_VFORK$]) +m4trace:configure.in:252: -1- AH_OUTPUT([HAVE_WORKING_VFORK], [/* Define to 1 if `vfork\' works. */ #undef HAVE_WORKING_VFORK]) -m4trace:configure.in:250: -1- AC_DEFINE_TRACE_LITERAL([vfork]) -m4trace:configure.in:250: -1- m4_pattern_allow([^vfork$]) -m4trace:configure.in:250: -1- AH_OUTPUT([vfork], [/* Define as `fork\' if `vfork\' does not work. */ +m4trace:configure.in:252: -1- AC_DEFINE_TRACE_LITERAL([vfork]) +m4trace:configure.in:252: -1- m4_pattern_allow([^vfork$]) +m4trace:configure.in:252: -1- AH_OUTPUT([vfork], [/* Define as `fork\' if `vfork\' does not work. */ #undef vfork]) -m4trace:configure.in:250: -1- AC_DEFINE_TRACE_LITERAL([HAVE_WORKING_FORK]) -m4trace:configure.in:250: -1- m4_pattern_allow([^HAVE_WORKING_FORK$]) -m4trace:configure.in:250: -1- AH_OUTPUT([HAVE_WORKING_FORK], [/* Define to 1 if `fork\' works. */ +m4trace:configure.in:252: -1- AC_DEFINE_TRACE_LITERAL([HAVE_WORKING_FORK]) +m4trace:configure.in:252: -1- m4_pattern_allow([^HAVE_WORKING_FORK$]) +m4trace:configure.in:252: -1- AH_OUTPUT([HAVE_WORKING_FORK], [/* Define to 1 if `fork\' works. */ #undef HAVE_WORKING_FORK]) -m4trace:configure.in:251: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:253: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H]) -m4trace:configure.in:251: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MALLOC]) -m4trace:configure.in:251: -1- m4_pattern_allow([^HAVE_MALLOC$]) -m4trace:configure.in:251: -1- AH_OUTPUT([HAVE_MALLOC], [/* Define to 1 if your system has a GNU libc compatible `malloc\' function, and +m4trace:configure.in:253: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MALLOC]) +m4trace:configure.in:253: -1- m4_pattern_allow([^HAVE_MALLOC$]) +m4trace:configure.in:253: -1- AH_OUTPUT([HAVE_MALLOC], [/* Define to 1 if your system has a GNU libc compatible `malloc\' function, and to 0 otherwise. */ #undef HAVE_MALLOC]) -m4trace:configure.in:251: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MALLOC]) -m4trace:configure.in:251: -1- m4_pattern_allow([^HAVE_MALLOC$]) -m4trace:configure.in:251: -1- AC_LIBSOURCE([malloc.c]) -m4trace:configure.in:251: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS malloc.$ac_objext"]) -m4trace:configure.in:251: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) -m4trace:configure.in:251: -1- m4_pattern_allow([^LIB@&t@OBJS$]) -m4trace:configure.in:251: -1- AC_DEFINE_TRACE_LITERAL([malloc]) -m4trace:configure.in:251: -1- m4_pattern_allow([^malloc$]) -m4trace:configure.in:251: -1- AH_OUTPUT([malloc], [/* Define to rpl_malloc if the replacement function should be used. */ +m4trace:configure.in:253: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MALLOC]) +m4trace:configure.in:253: -1- m4_pattern_allow([^HAVE_MALLOC$]) +m4trace:configure.in:253: -1- AC_LIBSOURCE([malloc.c]) +m4trace:configure.in:253: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS malloc.$ac_objext"]) +m4trace:configure.in:253: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) +m4trace:configure.in:253: -1- m4_pattern_allow([^LIB@&t@OBJS$]) +m4trace:configure.in:253: -1- AC_DEFINE_TRACE_LITERAL([malloc]) +m4trace:configure.in:253: -1- m4_pattern_allow([^malloc$]) +m4trace:configure.in:253: -1- AH_OUTPUT([malloc], [/* Define to rpl_malloc if the replacement function should be used. */ #undef malloc]) -m4trace:configure.in:252: -1- AC_LIBSOURCE([memcmp.c]) -m4trace:configure.in:252: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS memcmp.$ac_objext"]) -m4trace:configure.in:252: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) -m4trace:configure.in:252: -1- m4_pattern_allow([^LIB@&t@OBJS$]) -m4trace:configure.in:253: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:254: -1- AC_LIBSOURCE([memcmp.c]) +m4trace:configure.in:254: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS memcmp.$ac_objext"]) +m4trace:configure.in:254: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) +m4trace:configure.in:254: -1- m4_pattern_allow([^LIB@&t@OBJS$]) +m4trace:configure.in:255: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H]) -m4trace:configure.in:253: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:255: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H]) -m4trace:configure.in:253: -1- AH_OUTPUT([HAVE_GETPAGESIZE], [/* Define to 1 if you have the `getpagesize\' function. */ +m4trace:configure.in:255: -1- AH_OUTPUT([HAVE_GETPAGESIZE], [/* Define to 1 if you have the `getpagesize\' function. */ #undef HAVE_GETPAGESIZE]) -m4trace:configure.in:253: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MMAP]) -m4trace:configure.in:253: -1- m4_pattern_allow([^HAVE_MMAP$]) -m4trace:configure.in:253: -1- AH_OUTPUT([HAVE_MMAP], [/* Define to 1 if you have a working `mmap\' system call. */ +m4trace:configure.in:255: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MMAP]) +m4trace:configure.in:255: -1- m4_pattern_allow([^HAVE_MMAP$]) +m4trace:configure.in:255: -1- AH_OUTPUT([HAVE_MMAP], [/* Define to 1 if you have a working `mmap\' system call. */ #undef HAVE_MMAP]) -m4trace:configure.in:254: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ +m4trace:configure.in:256: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H]) -m4trace:configure.in:254: -1- AC_DEFINE_TRACE_LITERAL([HAVE_REALLOC]) -m4trace:configure.in:254: -1- m4_pattern_allow([^HAVE_REALLOC$]) -m4trace:configure.in:254: -1- AH_OUTPUT([HAVE_REALLOC], [/* Define to 1 if your system has a GNU libc compatible `realloc\' function, +m4trace:configure.in:256: -1- AC_DEFINE_TRACE_LITERAL([HAVE_REALLOC]) +m4trace:configure.in:256: -1- m4_pattern_allow([^HAVE_REALLOC$]) +m4trace:configure.in:256: -1- AH_OUTPUT([HAVE_REALLOC], [/* Define to 1 if your system has a GNU libc compatible `realloc\' function, and to 0 otherwise. */ #undef HAVE_REALLOC]) -m4trace:configure.in:254: -1- AC_DEFINE_TRACE_LITERAL([HAVE_REALLOC]) -m4trace:configure.in:254: -1- m4_pattern_allow([^HAVE_REALLOC$]) -m4trace:configure.in:254: -1- AC_LIBSOURCE([realloc.c]) -m4trace:configure.in:254: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS realloc.$ac_objext"]) -m4trace:configure.in:254: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) -m4trace:configure.in:254: -1- m4_pattern_allow([^LIB@&t@OBJS$]) -m4trace:configure.in:254: -1- AC_DEFINE_TRACE_LITERAL([realloc]) -m4trace:configure.in:254: -1- m4_pattern_allow([^realloc$]) -m4trace:configure.in:254: -1- AH_OUTPUT([realloc], [/* Define to rpl_realloc if the replacement function should be used. */ +m4trace:configure.in:256: -1- AC_DEFINE_TRACE_LITERAL([HAVE_REALLOC]) +m4trace:configure.in:256: -1- m4_pattern_allow([^HAVE_REALLOC$]) +m4trace:configure.in:256: -1- AC_LIBSOURCE([realloc.c]) +m4trace:configure.in:256: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS realloc.$ac_objext"]) +m4trace:configure.in:256: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) +m4trace:configure.in:256: -1- m4_pattern_allow([^LIB@&t@OBJS$]) +m4trace:configure.in:256: -1- AC_DEFINE_TRACE_LITERAL([realloc]) +m4trace:configure.in:256: -1- m4_pattern_allow([^realloc$]) +m4trace:configure.in:256: -1- AH_OUTPUT([realloc], [/* Define to rpl_realloc if the replacement function should be used. */ #undef realloc]) -m4trace:configure.in:255: -1- AC_DEFINE_TRACE_LITERAL([RETSIGTYPE]) -m4trace:configure.in:255: -1- m4_pattern_allow([^RETSIGTYPE$]) -m4trace:configure.in:255: -1- AH_OUTPUT([RETSIGTYPE], [/* Define as the return type of signal handlers (`int\' or `void\'). */ +m4trace:configure.in:257: -1- AC_DEFINE_TRACE_LITERAL([RETSIGTYPE]) +m4trace:configure.in:257: -1- m4_pattern_allow([^RETSIGTYPE$]) +m4trace:configure.in:257: -1- AH_OUTPUT([RETSIGTYPE], [/* Define as the return type of signal handlers (`int\' or `void\'). */ #undef RETSIGTYPE]) -m4trace:configure.in:256: -1- AH_OUTPUT([HAVE_VPRINTF], [/* Define to 1 if you have the `vprintf\' function. */ +m4trace:configure.in:258: -1- AH_OUTPUT([HAVE_VPRINTF], [/* Define to 1 if you have the `vprintf\' function. */ #undef HAVE_VPRINTF]) -m4trace:configure.in:256: -1- AC_DEFINE_TRACE_LITERAL([HAVE_DOPRNT]) -m4trace:configure.in:256: -1- m4_pattern_allow([^HAVE_DOPRNT$]) -m4trace:configure.in:256: -1- AH_OUTPUT([HAVE_DOPRNT], [/* Define to 1 if you don\'t have `vprintf\' but do have `_doprnt.\' */ +m4trace:configure.in:258: -1- AC_DEFINE_TRACE_LITERAL([HAVE_DOPRNT]) +m4trace:configure.in:258: -1- m4_pattern_allow([^HAVE_DOPRNT$]) +m4trace:configure.in:258: -1- AH_OUTPUT([HAVE_DOPRNT], [/* Define to 1 if you don\'t have `vprintf\' but do have `_doprnt.\' */ #undef HAVE_DOPRNT]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_FCHDIR], [/* Define to 1 if you have the `fchdir\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_FCHDIR], [/* Define to 1 if you have the `fchdir\' function. */ #undef HAVE_FCHDIR]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_GETCWD], [/* Define to 1 if you have the `getcwd\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_GETCWD], [/* Define to 1 if you have the `getcwd\' function. */ #undef HAVE_GETCWD]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_GETTIMEOFDAY], [/* Define to 1 if you have the `gettimeofday\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_GETTIMEOFDAY], [/* Define to 1 if you have the `gettimeofday\' function. */ #undef HAVE_GETTIMEOFDAY]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_MEMMOVE], [/* Define to 1 if you have the `memmove\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_MEMMOVE], [/* Define to 1 if you have the `memmove\' function. */ #undef HAVE_MEMMOVE]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_MEMSET], [/* Define to 1 if you have the `memset\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_MEMSET], [/* Define to 1 if you have the `memset\' function. */ #undef HAVE_MEMSET]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_MKDIR], [/* Define to 1 if you have the `mkdir\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_MKDIR], [/* Define to 1 if you have the `mkdir\' function. */ #undef HAVE_MKDIR]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_MUNMAP], [/* Define to 1 if you have the `munmap\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_MUNMAP], [/* Define to 1 if you have the `munmap\' function. */ #undef HAVE_MUNMAP]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_RMDIR], [/* Define to 1 if you have the `rmdir\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_RMDIR], [/* Define to 1 if you have the `rmdir\' function. */ #undef HAVE_RMDIR]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_STRCHR], [/* Define to 1 if you have the `strchr\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_STRCHR], [/* Define to 1 if you have the `strchr\' function. */ #undef HAVE_STRCHR]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_STRDUP], [/* Define to 1 if you have the `strdup\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_STRDUP], [/* Define to 1 if you have the `strdup\' function. */ #undef HAVE_STRDUP]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_STRERROR], [/* Define to 1 if you have the `strerror\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_STRERROR], [/* Define to 1 if you have the `strerror\' function. */ #undef HAVE_STRERROR]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_STRRCHR], [/* Define to 1 if you have the `strrchr\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_STRRCHR], [/* Define to 1 if you have the `strrchr\' function. */ #undef HAVE_STRRCHR]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_STRTOUL], [/* Define to 1 if you have the `strtoul\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_STRTOUL], [/* Define to 1 if you have the `strtoul\' function. */ #undef HAVE_STRTOUL]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_STRTOULL], [/* Define to 1 if you have the `strtoull\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_STRTOULL], [/* Define to 1 if you have the `strtoull\' function. */ #undef HAVE_STRTOULL]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_ALPHASORT], [/* Define to 1 if you have the `alphasort\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_ALPHASORT], [/* Define to 1 if you have the `alphasort\' function. */ #undef HAVE_ALPHASORT]) -m4trace:configure.in:257: -1- AH_OUTPUT([HAVE_DIRFD], [/* Define to 1 if you have the `dirfd\' function. */ +m4trace:configure.in:259: -1- AH_OUTPUT([HAVE_DIRFD], [/* Define to 1 if you have the `dirfd\' function. */ #undef HAVE_DIRFD]) -m4trace:configure.in:262: -1- AC_CONFIG_FILES([src/Makefile tests/Makefile]) -m4trace:configure.in:263: -1- AC_SUBST([LIB@&t@OBJS], [$ac_libobjs]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) -m4trace:configure.in:263: -1- m4_pattern_allow([^LIB@&t@OBJS$]) -m4trace:configure.in:263: -1- AC_SUBST([LTLIBOBJS], [$ac_ltlibobjs]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([LTLIBOBJS]) -m4trace:configure.in:263: -1- m4_pattern_allow([^LTLIBOBJS$]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([top_builddir]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([srcdir]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([abs_srcdir]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([top_srcdir]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([abs_top_srcdir]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([builddir]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([abs_builddir]) -m4trace:configure.in:263: -1- AC_SUBST_TRACE([abs_top_builddir]) +m4trace:configure.in:264: -1- AC_CONFIG_FILES([src/Makefile tests/Makefile]) +m4trace:configure.in:265: -1- AC_SUBST([LIB@&t@OBJS], [$ac_libobjs]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) +m4trace:configure.in:265: -1- m4_pattern_allow([^LIB@&t@OBJS$]) +m4trace:configure.in:265: -1- AC_SUBST([LTLIBOBJS], [$ac_ltlibobjs]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([LTLIBOBJS]) +m4trace:configure.in:265: -1- m4_pattern_allow([^LTLIBOBJS$]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([top_builddir]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([srcdir]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([abs_srcdir]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([top_srcdir]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([abs_top_srcdir]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([builddir]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([abs_builddir]) +m4trace:configure.in:265: -1- AC_SUBST_TRACE([abs_top_builddir]) diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/CHANGES /tmp/FWQOjxAJQ0/fsvs-1.1.17/CHANGES --- fsvs-1.1.14/CHANGES 2008-04-02 06:25:13.000000000 +0100 +++ fsvs-1.1.17/CHANGES 2008-10-29 07:19:09.000000000 +0000 @@ -1,3 +1,117 @@ +Changes since 1.1.16 +- New "uncopy" command, to disambiguate "revert". Manually added or + "prop-set" entries are kept known. +- Small cleanups and documentation updates. +- New option "config_dir", important for client certificate authentication. +- Performance fix for "fsvs diff -rX:Y entry" - don't diff the whole + working copy, only the given entries. +- "fsvs info" for the working copy root now prints the revision of the + highest priority URL, and not "0". +- Bugfix for "dir_sort" option; the root directory wasn't printed. +- New command "delay", for use in scripts. +- The option "dir_sort" now uses strcoll(), ie. sorts according to the + current locale. +- A new flag for ignore patterns, for matching directories only. +- "diff" showed for replaced entries "only in rX" - fixed. +- Changed the recursive behaviour to do the whole hierarchy before the + parent directory - needed for revert. +- Configure and install fixes, patch by Maurice. Thank you. +- Fixed duplicate "fsvs info deleted-directory" output. +- New option "all_removed", to trim the output for deleted hierarchies. +- Fixed a small memory usage problem, not directly a leak, but noticeable + with "sync-repos"; and a workaround for svn_ra_local__get_file() eating + memory. Thank you, Plamen. +- User-readable error on non-writeable $FSVS_CONF. Thank you, Thomas. +- New command "rel-ignore" for converting input to relative ignore + patterns. +- Try to get the directory mtimes correct again; that's a bit ugly, because + of arbitrary changes. +- Bugfix: for http URLs the temporary directory was filled with files. +- Export some environment variables for use in diff, commit- and + update-pipes. +- The filter option now allows "mode"; and it should work correctly + for commit, too. +- Splitted "-C" into finer grained option settings "-o change_check". + Changed the default to use MD5 on possibly-changed files. +- Started a "tips & tricks" document. +- Fixed the directory mtime for meta-data-less ("svn import"ed), empty + directories. +- Bugfix: new parent directories sent wrong meta-data to the repository on + a deep commit (ie. the parent directory wasn't directly mentioned). +- Bugfix for filtered commit (eg. "-f text"). +- Another try to detect invalid "colordiff" program names, and a better + message than a simple "EPIPE". +- Lots of internal data structure changes, and how the entries are handled. +- New "fsvs cat" command, to fetch really pristine copies from the + repository. +- Some documentation fixes, and repairs for the man pages. +- FSVS now shows "maybe changed" for unreadable files or directories; + should we throw a warning? +- Ignore patterns can now match the entries' mode. + +Changes since 1.1.15 +- FSVS_WARNINGS removed. Use FSVS_WARNING. +- Small fixes and cleanups. +- Handling of FSVS_WAA and FSVS_CONF now via the normal option handling, to + reduce code size. Now it's possible to use "-oconf=..." on the command + line, too. + (But it's not possible to override the paths from the config file.) +- Fixed EPIPE handling in many functions, most notably on "fsvs st" output. +- The colordiff option has changed; it can now be used to specify the + binary to use instead of the default "colordiff". +- "fsvs diff" would leave a temporary file behind if the colordiff binary + quits unexpectedly, or a EPIPE was caught. +- Bugfix for error after commit, when $EDITOR returned an 0 byte file as + commit message. +- "fsvs diff" changed to recursive behaviour, as "svn" does. +- Fixed "fsvs diff -rX" to print only changed entries, not the whole list. +- "fsvs diff -rX:Y" reimplemented, too; performance could get optimized. +- Changed the memory allocation pattern to (hopefully) allocate in larger + blocks. +- Removed entries are not kept in some mixed mode internally, but + temporarily copied and remembered as old - cleans up some code, and + lowers memory usage by about 20 byte per entry on 32bit; 32 on 64bit. +- Fixed some issues in the example/setup.sh script. +- Moved a lot of structure members, to avoid padding (esp. on 64bit). +- Some bigger cleanups (FS_CHILD_CHANGED) +- Fix for waa__mkdir() not seeing that the existing inode isn't a + directory. +- Bugfix for special entries; they'd be created as $destination.XXXXXX, + without checking whether such a file already exists. +- Bugfix for revert - if an entry gets reverted, we implicitly change the + parents mtime. That shouldn't be printed as a change, though. +- Removed properties wouldn't get committed or updated properly. +- Committed the distclean patch from Sheldon Hearn. +- "make install" implemented. +- Tempfile generation consolidated. +- Rewrote entry fetching from the repository. Previously a file with bad + mode (like 0111) couldn't get diffed. +- "update -rX" and "diff -rX" (but not "diff -rX:Y") now use the per-url + override syntax (see "-u"). +- Multi-URL code has been found to be not perfect - removing a (common) + directory removes files from all URLs. +- New option to set maximum number of revisions on "fsvs log". +- Fixed a memory leak for symlinks. +- Fixed diff for symlinks and devices. +- "fsvs sync" now tries to fetch all filesizes and entry types from the + repository, too. For that we have to do a recursive listing, and fetch + special entries; encoded files with only a few kB are looked at, too. + Needs more time and bandwidth. +- "update -rX" and "diff -rX" (but not "diff -rX:Y") now use the per-url + override syntax (see "-u"). +- Multi-URL code has been found to be not perfect - removing a (common) + directory removes files from all URLs. +- New option to set maximum number of revisions on "fsvs log". +- Fixed a memory leak for symlinks. +- Fixed diff for symlinks and devices. +- New option "stop_change" for use in scripts. +- New option "author" for commit. +- Changed from "merge" to "diff3" as merge program; seems to be more common. + +Changes since 1.1.14 +- Fixed a bug in the shadow-clean.pl script; it wouldn't remove the + passwords. + Changes since 1.1.13 - New option for conflict handling; allows "stop" (historical default), "local", "remote", "both" or "merge". @@ -15,6 +129,7 @@ wrong options. - Bugfix for revert - would save the dir-list, even when an error occurs. - update can now be restricted to an arbitrary subset of URLs. +- Fix for "revert -rX" for changed file types. - Tried to get compatibility with systems that don't expose MAJOR, MINOR and MKDEV (eg. Solaris). diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/configure /tmp/FWQOjxAJQ0/fsvs-1.1.17/configure --- fsvs-1.1.14/configure 2008-04-02 10:59:38.000000000 +0100 +++ fsvs-1.1.17/configure 2008-10-29 09:03:23.000000000 +0000 @@ -3137,6 +3137,83 @@ fi +{ echo "$as_me:$LINENO: checking for svn_txdelta_apply in -lsvn_delta-1" >&5 +echo $ECHO_N "checking for svn_txdelta_apply in -lsvn_delta-1... $ECHO_C" >&6; } +if test "${ac_cv_lib_svn_delta_1_svn_txdelta_apply+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvn_delta-1 $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char svn_txdelta_apply (); +int +main () +{ +return svn_txdelta_apply (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_svn_delta_1_svn_txdelta_apply=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_svn_delta_1_svn_txdelta_apply=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_svn_delta_1_svn_txdelta_apply" >&5 +echo "${ECHO_T}$ac_cv_lib_svn_delta_1_svn_txdelta_apply" >&6; } +if test $ac_cv_lib_svn_delta_1_svn_txdelta_apply = yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBSVN_DELTA_1 1 +_ACEOF + + LIBS="-lsvn_delta-1 $LIBS" + +else + { { echo "$as_me:$LINENO: error: Sorry, can't find subversion. +See \`config.log' for more details." >&5 +echo "$as_me: error: Sorry, can't find subversion. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + + { echo "$as_me:$LINENO: checking for svn_ra_initialize in -lsvn_ra-1" >&5 echo $ECHO_N "checking for svn_ra_initialize in -lsvn_ra-1... $ECHO_C" >&6; } if test "${ac_cv_lib_svn_ra_1_svn_ra_initialize+set}" = set; then diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/configure.in /tmp/FWQOjxAJQ0/fsvs-1.1.17/configure.in --- fsvs-1.1.14/configure.in 2008-04-02 06:25:13.000000000 +0100 +++ fsvs-1.1.17/configure.in 2008-08-26 16:08:05.000000000 +0100 @@ -100,6 +100,8 @@ [AC_MSG_FAILURE([Sorry, can't find PCRE.])]) AC_CHECK_LIB([aprutil-1], [apr_md5_init], [], [AC_MSG_FAILURE([Sorry, can't find APR.])]) +AC_CHECK_LIB([svn_delta-1], [svn_txdelta_apply], [], + [AC_MSG_FAILURE([Sorry, can't find subversion.])]) AC_CHECK_LIB([svn_ra-1], [svn_ra_initialize], [], [AC_MSG_FAILURE([Sorry, can't find subversion.])]) AC_CHECK_LIB([gdbm], [gdbm_firstkey], [], diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/debian/changelog /tmp/FWQOjxAJQ0/fsvs-1.1.17/debian/changelog --- fsvs-1.1.14/debian/changelog 2008-12-15 09:12:09.000000000 +0000 +++ fsvs-1.1.17/debian/changelog 2008-12-15 09:12:09.000000000 +0000 @@ -1,3 +1,25 @@ +fsvs (1.1.17-1~ppa2) intrepid; urgency=low + + * Intrepid version + + -- David Fraser Mon, 29 Dec 2008 10:46:00 +0200 + +fsvs (1.1.17-1~ppa1) hardy; urgency=low + + * Updated to latest version from upstream + + -- David Fraser Wed, 29 Oct 2008 12:30:00 +0200 + +fsvs (1.1.16-1) unstable; urgency=low + + * New upstream release + * Makefile now provides own distclean. + * With no patches remaining, remove dpatch Build-Depends. + * Add Homepage and upgrade Standards-Version. + * Update description as per request from upstream. + + -- Sheldon Hearn Sat, 12 Jul 2008 15:31:13 +0200 + fsvs (1.1.14-1) unstable; urgency=low * New upstream release diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/debian/control /tmp/FWQOjxAJQ0/fsvs-1.1.17/debian/control --- fsvs-1.1.14/debian/control 2008-12-15 09:12:09.000000000 +0000 +++ fsvs-1.1.17/debian/control 2008-12-15 09:12:09.000000000 +0000 @@ -2,15 +2,16 @@ Section: admin Priority: optional Maintainer: Sheldon Hearn -Build-Depends: debhelper (>= 5), dpatch, autotools-dev, libsvn-dev, libgdbm-dev, ctags, libaprutil1-dev, libpcre3-dev -Standards-Version: 3.7.3 +Build-Depends: debhelper (>= 5), autotools-dev, libsvn-dev, libgdbm-dev, ctags, libaprutil1-dev, libpcre3-dev +Standards-Version: 3.8.0 +Homepage: http://fsvs.tigris.org/ Package: fsvs Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Recommends: subversion -Description: full directory tree or filesystem versioning with metadata support - FSVS stands for "Fast System VerSioning", "File System VerSioning" or - "Full System VerSioning". It it a complete backup/restore tool for all files - in a directory tree or for whole filesystems, with a subversion repository as - the backend. +Description: Full system versioning with metadata support + FSVS is a backup/restore/versioning/deployment tool for whole directory + trees or filesystems, with a subversion repository as the backend. + It can do overlays of multiple repositories, to achieve some content + separation (base install, local modifications, etc.) diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/debian/patches/00list /tmp/FWQOjxAJQ0/fsvs-1.1.17/debian/patches/00list --- fsvs-1.1.14/debian/patches/00list 2008-12-15 09:12:09.000000000 +0000 +++ fsvs-1.1.17/debian/patches/00list 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -makefile_distclean diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/debian/patches/makefile_distclean.dpatch /tmp/FWQOjxAJQ0/fsvs-1.1.17/debian/patches/makefile_distclean.dpatch --- fsvs-1.1.14/debian/patches/makefile_distclean.dpatch 2008-12-15 09:12:09.000000000 +0000 +++ fsvs-1.1.17/debian/patches/makefile_distclean.dpatch 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -#! /bin/sh /usr/share/dpatch/dpatch-run -## makefile_distclean.dpatch by Sheldon Hearn -## -## All lines beginning with `## DP:' are a description of the patch. -## DP: Add distclean target to top-level Makefile - -@DPATCH@ - ---- fsvs-1.1.12.orig/Makefile -+++ fsvs-1.1.12/Makefile -@@ -16,3 +16,8 @@ - configure: configure.in - @echo Generating configure. - autoconf -+ -+distclean: -+ rm -f config.cache config.log config.status 2> /dev/null || true -+ rm -f src/Makefile src/tags tests/Makefile 2> /dev/null || true -+ rm -f src/config.h src/*.[os] src/.*.d src/fsvs 2> /dev/null || true diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/debian/rules /tmp/FWQOjxAJQ0/fsvs-1.1.17/debian/rules --- fsvs-1.1.14/debian/rules 2008-12-15 09:12:09.000000000 +0000 +++ fsvs-1.1.17/debian/rules 2008-12-15 09:12:09.000000000 +0000 @@ -15,8 +15,6 @@ DEB_HOST_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE) DEB_BUILD_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE) -include /usr/share/dpatch/dpatch.make - CFLAGS = -Wall -g ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS))) @@ -32,17 +30,14 @@ build: build-stamp -build-stamp: patch-stamp config.status +build-stamp: config.status dh_testdir $(MAKE) -C src touch $@ -# distclean is only added to top-level Makefile by dpatch -clean: patch-stamp clean-patched unpatch - -clean-patched: +clean: dh_testdir dh_testroot rm -f build-stamp diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/doc/fsvs.1 /tmp/FWQOjxAJQ0/fsvs-1.1.17/doc/fsvs.1 --- fsvs-1.1.14/doc/fsvs.1 2008-04-02 06:24:33.000000000 +0100 +++ fsvs-1.1.17/doc/fsvs.1 2008-10-29 08:19:27.000000000 +0000 @@ -1,8 +1,8 @@ -.TH "Commands and command line parameters" 1 "1 Apr 2008" "Version trunk:2078" "fsvs" \" -*- nroff -*- +.TH "FSVS - fast versioning tool" 1 "29 Oct 2008" "Version trunk:2782" "fsvs" \" -*- nroff -*- .ad l .nh .SH NAME -fsvs - fast versioning tool +FSVS - fast versioning tool .SH "SYNOPSIS" .PP \fCfsvs command [options] [args]\fP @@ -25,14 +25,14 @@ .PP .SH "Defining which entries to take:" .PP -.IP "\fB\fBignore\fP\fP" 1c +.IP "\fB\fBignore\fP and \fBrign\fP\fP" 1c \fCDefine ignore patterns\fP .IP "\fB\fBunversion\fP\fP" 1c \fCRemove entries from versioning\fP .IP "\fB\fBadd\fP\fP" 1c \fCAdd entries that would be ignored\fP .IP "\fB\fBcp\fP, \fBmv\fP\fP" 1c -\fCTell \fCfsvs\fP that entries were copied.\fP +\fCTell \fCfsvs\fP that entries were copied\fP .PP .SH "Commands working with the repository:" .PP @@ -42,13 +42,17 @@ \fCGet updates from the repository\fP .IP "\fB\fBcheckout\fP\fP" 1c \fCFetch some part of the repository, and register it as working copy\fP -.IP "\fB\fBrevert\fP\fP" 1c -\fCUndo local changes\fP -.IP "\fB\fBremote-status\fP\fP" 1c -\fCAsk what an \fBupdate\fP would bring\fP +.IP "\fB\fBcat\fP\fP" 1c +\fCGet a file from the directory \fP +.IP "\fB\fB\fCrevert\fP\fP and \fB\fCuncp\fP\fP\fP" 1c +\fC\fCUndo local changes and entry markings\fP \fP +.IP "\fB\fB\fCremote-status\fP\fP\fP" 1c +\fC\fCAsk what an \fBupdate\fP would bring\fP \fP .PP -.SH "Property handling" .PP +.SH "Property handling:" +.PP +\fC .IP "\fB\fBprop-set\fP\fP" 1c \fCSet user-defined properties\fP .IP "\fB\fBprop-get\fP\fP" 1c @@ -56,36 +60,47 @@ .IP "\fB\fBprop-list\fP\fP" 1c \fCGet a list of user-defined properties\fP .PP +\fP +.PP .SH "Additional commands used for recovery and debugging:" .PP +\fC .IP "\fB\fBexport\fP\fP" 1c \fCFetch some part of the repository\fP .IP "\fB\fBsync-repos\fP\fP" 1c \fCDrop local information about the entries, and fetch the current list from the repository.\fP .PP +\fP +.PP +\fC .PP \fBNote:\fP .RS 4 Multi-url-operations are relatively new; there might be rough edges. .RE .PP +The \fBreturn code\fP is \fC0\fP for success, or \fC2\fP for an error. \fC1\fP is returned if the option \fBStopping status reports as soon as changes are found\fP is used, and changes are found; see also \fBFiltering entries\fP.\fP +.PP .SH "Universal options" .PP .SS "-V -- show version" -\fC-V\fP makes \fCfsvs\fP print the version and a copyright notice, and exit. +\fC \fC-V\fP makes \fCfsvs\fP print the version and a copyright notice, and exit.\fP +.PP .SS "-d and -D -- debugging" -If \fCfsvs\fP was compiled using \fC--enable-debug\fP you can enable printing of debug messages (to \fCSTDOUT\fP) with \fC-d\fP. Per default all messages are printed; if you're only interested in a subset, you can use \fC-D\fP \fIstart-of-function-name\fP. +\fC If \fCfsvs\fP was compiled using \fC--enable-debug\fP you can enable printing of debug messages (to \fCSTDOUT\fP) with \fC-d\fP. Per default all messages are printed; if you're only interested in a subset, you can use \fC-D\fP \fIstart-of-function-name\fP. .PP .nf fsvs -d -D waa_ status .fi .PP - would call the \fIstatus\fP action, printing all debug messages of all WAA functions - \fCwaa__init\fP, \fCwaa__open\fP, etc. + would call the \fIstatus\fP action, printing all debug messages of all WAA functions - \fCwaa__init\fP, \fCwaa__open\fP, etc.\fP .PP -Furthermore you can specify the debug output destination with the option \fCdebug_output\fP. This can be a simple filename (which gets truncated), or, if it starts with a \fC\fP|, a command that the output gets piped into. +\fC Furthermore you can specify the debug output destination with the option \fCdebug_output\fP. This can be a simple filename (which gets truncated), or, if it starts with a \fC\fP|, a command that the output gets piped into.\fP .PP -If the destination cannot be opened (or none is given), debug output goes to \fCSTDOUT\fP. +\fC If the destination cannot be opened (or none is given), debug output goes to \fCSTDOUT\fP.\fP +.PP +\fC .PP \fBNote:\fP .RS 4 @@ -99,45 +114,43 @@ .fi .PP +\fP +.PP .SS "-N, -R -- recursion" -The \fC-N\fP and \fC-R\fP switches in effect just decrement/increment a counter; the behavious is chosen depending on that. So \fC-N -N -N -R -R\fP is equivalent to \fC-N\fP. +\fC The \fC-N\fP and \fC-R\fP switches in effect just decrement/increment a counter; the behavious is chosen depending on that. So \fC-N -N -N -R -R\fP is equivalent to \fC-N\fP.\fP +.PP .SS "-q, -v -- verbose/quiet" -Like the options for recursive behaviour (\fC-R\fP and \fC-N\fP) \fC-v\fP and \fC-q\fP just inc/decrement a counter. The higher the value, the more verbose. +\fC Like the options for recursive behaviour (\fC-R\fP and \fC-N\fP) \fC-v\fP and \fC-q\fP just inc/decrement a counter. The higher the value, the more verbose. .br - Currently only the values \fC-1\fP (quiet), \fC0\fP (normal), and \fC+1\fP (verbose) are used. -.SS "-C -- checksum" -\fC-C\fP increments the checksum flag. Normally \fIstatus\fP tells that a file has \fBpossible\fP modification, if its mtime has changed but its size not. Using \fC-C\fP you can tell the commands to be extra careful and \fBalways\fP check for modifications. + Currently only the values \fC-1\fP (quiet), \fC0\fP (normal), and \fC+1\fP (verbose) are used.\fP .PP -The values are 0 Normal operations 1 Check files for modifications if possibly changed 2 Do an MD5 verification for all files, and check all directories for new entries. -.PP -If a files size has changed, we can be sure that it's changed; a directory is checked for changes if any of its meta-data has changed (mtime, ctime, owner, group, size, mode). -.PP -\fBNote:\fP -.RS 4 -\fIcommit\fP and \fIupdate\fP set the checksum flag to \fBat least\fP 1, to avoid missing changed files. -.RE +.SS "-C -- checksum" +\fC \fC-C\fP chooses to use more change detection checks; please see \fBthe change_check option\fP for more details.\fP .PP .SS "-f -- filter entries" -This parameter allows to do a bit of filtering of entries, or, for some operations, modification of the work done on given entries. +\fC This parameter allows to do a bit of filtering of entries, or, for some operations, modification of the work done on given entries.\fP +.PP +\fC It requires a specification at the end, which can be any combination of \fCany\fP, \fCtext\fP, \fCnew\fP, \fCdeleted\fP (or \fCremoved\fP), \fCmeta\fP, \fCmtime\fP, \fCgroup\fP, \fCmode\fP, \fCchanged\fP or \fCowner\fP.\fP .PP -It requires a specification at the end, which can be any combination of \fCany\fP, \fCtext\fP, \fCnew\fP, \fCdeleted\fP, \fCmeta\fP, \fCmtime\fP, \fCgroup\fP or \fCowner\fP. +\fC By giving eg. the value \fCtext\fP, with a \fBstatus\fP action only entries that are new or changed are shown; with \fCmtime\fP,group only entries whose group or modification time has changed are printed.\fP .PP -By giving eg. the value \fCtext\fP, with a \fBstatus\fP action only entries that are new or changed are shown; with \fCmtime\fP,group only entries whose group or modification time has changed are printed. +\fC .PP \fBNote:\fP .RS 4 -The list does not include \fBpossibly\fP changed entries; see \fB-C -- checksum\fP \fC-C\fP. +Please see \fBChange detection\fP for some more information. .PP If an entry gets replaced with an entry of a different type (eg. a directory gets replaced by a file), that counts as \fCdeleted\fP \fBand\fP \fCnew\fP. .RE .PP -If you use \fC-v\fP, it's used as a \fCany\fP internally. +If you use \fC-v\fP, it's used as a \fCany\fP internally.\fP +.PP +\fC If you use the string \fCnone\fP, it resets the bitmask to \fBno\fP entries shown; then you can built a new mask. So \fCowner\fP,none,any,none,delete would show deleted entries. If the value after all commandline parsing is \fCnone\fP, it is reset to the default.\fP .PP -If you use the string \fCnone\fP, it resets the bitmask to \fBno\fP entries shown; then you can built a new mask. So \fCowner\fP,none,any,none,delete would show deleted entries. If the value after all commandline parsing is \fCnone\fP, it is reset to the default. .SS "-W warning=action -- set warnings" -Here you can define the behaviour for certain situations that should not normally happen, but which you might encounter. +\fC Here you can define the behaviour for certain situations that should not normally happen, but which you might encounter.\fP .PP -The general format here is \fIspecification\fP = \fIaction\fP, where \fIspecification\fP is a string matching the start of at least one of the defined situations, and \fIaction\fP is one of these: +\fC The general format here is \fIspecification\fP = \fIaction\fP, where \fIspecification\fP is a string matching the start of at least one of the defined situations, and \fIaction\fP is one of these: .IP "\(bu" 2 \fIonce\fP to print only a single warning, .IP "\(bu" 2 @@ -149,12 +162,13 @@ .IP "\(bu" 2 \fIcount\fP to just count the number of occurrences. .PP +\fP .PP -If \fIspecification\fP matches more than one situation, all of them are set; eg. for \fImeta=ignore\fP all of \fImeta-mtime\fP, \fImeta-user\fP etc. are ignored. +\fC If \fIspecification\fP matches more than one situation, all of them are set; eg. for \fImeta=ignore\fP all of \fImeta-mtime\fP, \fImeta-user\fP etc. are ignored.\fP .PP -If at least a single warning that is \fBnot\fP ignored is encountered during the program run, a list of warnings along with the number of messages it would have printed with the setting \fIalways\fP is displayed, to inform the user of possible problems. +\fC If at least a single warning that is \fBnot\fP ignored is encountered during the program run, a list of warnings along with the number of messages it would have printed with the setting \fIalways\fP is displayed, to inform the user of possible problems.\fP .PP -The following situations can be handled with this: \fImeta-mtime\fP, \fImeta-user\fP, \fImeta-group\fP, \fImeta-umask\fP These warnings are issued if a meta-data property that was fetched from the repository couldn't be parsed. This can only happen if some other program or a user changes properties on entries. +\fC The following situations can be handled with this: \fImeta-mtime\fP, \fImeta-user\fP, \fImeta-group\fP, \fImeta-umask\fP These warnings are issued if a meta-data property that was fetched from the repository couldn't be parsed. This can only happen if some other program or a user changes properties on entries. .br In this case you can use \fC-Wmeta=always\fP or \fC-Wmeta=count\fP, until the repository is clean again. .PP @@ -176,38 +190,59 @@ .PP \fIdiff-status\fP GNU diff has defined that it returns an exit code 2 in case of an error; sadly it returns that also for binary files, so that a simply \fCfsvs diff some-binary-file text-file\fP would abort without printing the diff for the second file. So the exit status of diff is per default ignored, but can be used by setting this option to eg. \fIstop\fP. .PP +\fP +.PP +\fC Also an environment variable \fCFSVS_WARNINGS\fP is used and parsed; it is simply a whitespace-separated list of option specifications.\fP .PP -Also an environment variable \fCFSVS_WARNINGS\fP is used and parsed; it is simply a whitespace-separated list of option specifications. -.SS "-u URLname[@revision] -- select URLs" -Some commands' operations can be reduced to a subset of defined URLs; the \fBupdate\fP command is the best example. +.SS "-u URLname[@revision[:revision]] -- select URLs" +\fC Some commands can be reduced to a subset of defined URLs; the \fBupdate\fP command is a example.\fP .PP -If you have more than a single URL in use for your working copy, and \fCupdate\fP updates \fBall\fP entries from \fBall\fP URLs. By using this parameter you can tell FSVS to update only a single URL. +\fC If you have more than a single URL in use for your working copy, \fCupdate\fP normally updates \fBall\fP entries from \fBall\fP URLs. By using this parameter you can tell FSVS to update only the specified URLs.\fP .PP -The parameter can be used repeatedly; the value can have multiple URLs, separated by whitespace or one of \fC',;'\fP. +\fC The parameter can be used repeatedly; the value can have multiple URLs, separated by whitespace or one of \fC',;'\fP.\fP .PP +\fC .PP .nf fsvs up -u base_install,boot@32 -u gcc + .fi .PP - This would get \fCHEAD\fP of \fCbase_install\fP and \fCgcc\fP, and set the target revision of the \fCboot\fP URL at 32. +\fP +.PP +\fC This would get \fCHEAD\fP of \fCbase_install\fP and \fCgcc\fP, and set the target revision of the \fCboot\fP URL at 32.\fP +.PP +\fC +.PP +\fBNote:\fP +.RS 4 +The second revision specification will be used for eg. the \fBdiff\fP command; but this is not yet implemented. +.RE +.PP +\fP .SS "-o [name[=value]] -- other options" -This is used for setting some seldom used option, for which default can be set in a configuration file (to be implemented, currently only command-line). +\fC This is used for setting some seldom used option, for which default can be set in a configuration file (to be implemented, currently only command-line).\fP +.PP +\fC For a list of these please see \fBFurther options for FSVS.\fP.\fP .PP -For a list of these please see \fBFurther options for FSVS.\fP. +\fC\fP .SH "add" .PP +\fC .PP .nf fsvs add PATH [PATH...] + .fi .PP +\fP +.PP +\fC With this command you can explicitly define entries to be versioned, even if they have a matching ignore pattern. They will be sent to the repository on the next commit, just like other new entries, and will therefore be reported as \fINew\fP .\fP .PP -With this command you can explicitly define entries to be versioned, even if they have a matching ignore pattern. They will be sent to the repository on the next commit, just like other new entries, and will therefore be reported as \fINew\fP . .SS "Example" -Say, you're versioning your home directory, and gave an ignore pattern of \fC./.*\fP to ignore all \fC.*\fP entries in your home-directory. Now you want \fC.bashrc\fP, \fC.ssh/config\fP, and your complete \fC.kde3-tree\fP saved, just like other data. +\fC Say, you're versioning your home directory, and gave an ignore pattern of \fC./.*\fP to ignore all \fC.*\fP entries in your home-directory. Now you want \fC.bashrc\fP, \fC.ssh/config\fP, and your complete \fC.kde3-tree\fP saved, just like other data.\fP .PP -So you tell fsvs to not ignore these entries: +\fC So you tell fsvs to not ignore these entries: .PP .nf fsvs add .bashrc .ssh/config .kde3 @@ -221,103 +256,160 @@ .fi .PP - Now a \fCfsvs st\fP would show your entries as \fINew\fP , and the next commit will send them to the repository. + Now a \fCfsvs st\fP would show your entries as \fINew\fP , and the next commit will send them to the repository.\fP +.PP +\fC .PP \fBNote:\fP .RS 4 This loads the wc data, writes the given paths with some flags to it, and saves the wc data again. .RE .PP +\fP .SH "unversion" .PP +\fC .PP .nf fsvs unversion PATH [PATH...] + .fi .PP +\fP +.PP +\fC This command flags the given paths locally as removed. On the next commit they will be deleted in the repository, and the local information of them will be removed, but not the entries themselves. So they will show up as \fINew\fP again, and you get another chance at ignoring them.\fP .PP -This command flags the given paths locally as removed. On the next commit they will be deleted in the repository, and the local information of them will be removed, but not the entries themselves. So they will show up as \fINew\fP again, and you get another chance at ignoring them. .SS "Example" -Say, you're versioning your home directory, and found that you no longer want \fC.bash_history\fP and \fC.sh_history\fP versioned. So you do +\fC Say, you're versioning your home directory, and found that you no longer want \fC.bash_history\fP and \fC.sh_history\fP versioned. So you do .PP .nf fsvs unversion .bash_history .sh_history .fi .PP - and these files will be reported as \fCd\fP (will be deleted, but only in the repository). + and these files will be reported as \fCd\fP (will be deleted, but only in the repository).\fP .PP -Then you do a +\fC Then you do a .PP .nf fsvs commit .fi .PP +\fP .PP -Now fsvs would report these files as \fCNew\fP , as it does no longer know anything about them; but that can be cured by +\fC Now fsvs would report these files as \fCNew\fP , as it does no longer know anything about them; but that can be cured by .PP .nf fsvs ignore './.*sh_history' .fi .PP - Now these two files won't be shown as \fINew\fP , either. + Now these two files won't be shown as \fINew\fP , either.\fP .PP -The example also shows why the given paths are not just entered as separate ignore patterns - they are just single cases of a (probably) much broader pattern. +\fC The example also shows why the given paths are not just entered as separate ignore patterns - they are just single cases of a (probably) much broader pattern.\fP +.PP +\fC .PP \fBNote:\fP .RS 4 If you didn't use some kind of escaping for the pattern, the shell would expand it to the actual filenames, which is (normally) not what you want. .RE .PP +\fP .SH "_build_new_list" .PP -This is used mainly for debugging. It traverses the filesystem and build a new entries file. In production it should not be used - as the revision of the entries is unknown, we can only use 0 - and loose information this way! +\fC This is used mainly for debugging. It traverses the filesystem and build a new entries file. In production it should not be used - as the revision of the entries is unknown, we can only use 0 - and loose information this way!\fP +.PP +\fC\fP +.SH "delay" +.PP +\fC This command delays execution until the time has passed at least to the next second after writing the \fBdir\fP and \fBurls\fP files. So, where previously the \fBdelay\fP option was used, this can be substituted by the given command followed by the \fCdelay\fP command.\fP +.PP +\fC The advantage is over the \fBWaiting for a time change after working copy operations\fP option is, that read-only commands can be used in the meantime.\fP +.PP +\fC An example: +.PP +.nf + fsvs commit /etc/X11 -m 'Backup of X11' + ... read-only commands, like 'status' + fsvs delay /etc/X11 + ... read-write commands, like 'commit' + +.fi +.PP +\fP +.PP +\fC In the testing framework it is used to save a bit of time; in normal operation, where \fCfsvs\fP commands are not so tightly packed, it is normally preferable to use the \fBdelay\fP option.\fP +.PP +\fC\fP +.SH "cat" +.PP +\fC +.PP +.nf + fsvs cat [-r rev] path + +.fi +.PP +\fP +.PP +\fC Fetches a file with the specified revision or, if not given, BASE, from the repository, and outputs it to \fCSTDOUT\fP.\fP +.PP +\fC\fP .SH "checkout" .PP +\fC .PP .nf fsvs checkout [path] URL [URLs...] + .fi .PP +\fP .PP -Sets one or more URLs for the current working directory (or the directory \fCpath\fP), and does an \fBcheckout\fP of these URLs. +\fC Sets one or more URLs for the current working directory (or the directory \fCpath\fP), and does an \fBcheckout\fP of these URLs.\fP .PP -Example: +\fC Example: .PP .nf fsvs checkout . http://svn/repos/installation/machine-1/trunk .fi .PP +\fP .PP -The distinction whether a directory is given or not is done based on the result of URL-parsing -- if it looks like an URL, it is used as an URL. +\fC The distinction whether a directory is given or not is done based on the result of URL-parsing -- if it looks like an URL, it is used as an URL. .br - Please mind that at most a single path is allowed; as soon as two non-URLs are found an error message is printed. + Please mind that at most a single path is allowed; as soon as two non-URLs are found an error message is printed.\fP .PP -If no directory is given, \fC\fP. is used; this differs from the usual subversion usage, but might be better suited for usage as a recovery tool (where versioning \fC/\fP is common). Opinions welcome. +\fC If no directory is given, \fC\fP. is used; this differs from the usual subversion usage, but might be better suited for usage as a recovery tool (where versioning \fC/\fP is common). Opinions welcome.\fP .PP -The given \fCpath\fP must exist, and \fBshould\fP be empty -- \fCfsvs\fP will abort on conflicts, ie. if files that should be created already exist. +\fC The given \fCpath\fP must exist, and \fBshould\fP be empty -- \fCfsvs\fP will abort on conflicts, ie. if files that should be created already exist. .br - If there's a need to create that directory, please say so; patches for some parameter like \fC-p\fP are welcome. + If there's a need to create that directory, please say so; patches for some parameter like \fC-p\fP are welcome.\fP +.PP +\fC For a format definition of the URLs please see the chapter \fBFormat of URLs\fP and the \fBurls\fP and \fBupdate\fP commands.\fP .PP -For a format definition of the URLs please see the chapter \fBFormat of URLs\fP and the \fBurls\fP and \fBupdate\fP commands. +\fC Furthermore you might be interested in \fBUsing an alternate root directory\fP and \fBRecovery for a non-booting system\fP.\fP .PP -Furthermore you might be interested in \fBUsing an alternate root directory\fP and \fBRecovery for a non-booting system\fP. +\fC\fP .SH "commit" .PP +\fC .PP .nf fsvs commit [-m 'message'|-F filename] [-v] [-C [-C]] [PATH [PATH ...]] + .fi .PP +\fP .PP -Commits the current state into the repository. It is possible to commit only parts of a working copy into the repository. +\fC Commits the current state into the repository. It is possible to commit only parts of a working copy into the repository.\fP .PP -Your working copy is \fC/etc\fP , and you've set it up and committed already. Now you've changed \fC/etc/hosts\fP , and \fC/etc/inittab\fP . Since these are non-related changes, you'd like them to be in separate commits. +\fC Your working copy is \fC/etc\fP , and you've set it up and committed already. Now you've changed \fC/etc/hosts\fP , and \fC/etc/inittab\fP . Since these are non-related changes, you'd like them to be in separate commits.\fP .PP -So you simply run these commands: +\fC So you simply run these commands: .PP .nf fsvs commit -m 'Added some host' /etc/hosts @@ -325,44 +417,49 @@ .fi .PP +\fP .PP -If you're currently in \fC/etc\fP , you can even drop the \fC/etc/\fP in front, and just use the filenames. -.PP -This extended path handling on the commandline is not yet available for every command. Most of them still expect you to be in the working copy root. +\fC If you're currently in \fC/etc\fP , you can even drop the \fC/etc/\fP in front, and just use the filenames.\fP .PP -Please see \fBstatus\fP for explanations on \fC-v\fP and \fC-C\fP . For advanced backup usage see also \fBFSVS_PROP_COMMIT_PIPE\fP. +\fC Please see \fBstatus\fP for explanations on \fC-v\fP and \fC-C\fP . For advanced backup usage see also \fBFSVS_PROP_COMMIT_PIPE\fP.\fP .PP - .SH "cp" .PP +\fC .PP .nf fsvs cp [-r rev] SRC DEST fsvs cp dump fsvs cp load + .fi .PP +\fP .PP -This command marks \fCDEST\fP as a copy of \fCSRC\fP at revision \fCrev\fP, so that on the next commit of \fCDEST\fP the corresponding source path is sent as copy source. +\fC The \fCcopy\fP command marks \fCDEST\fP as a copy of \fCSRC\fP at revision \fCrev\fP, so that on the next commit of \fCDEST\fP the corresponding source path is sent as copy source.\fP .PP -The default value for \fCrev\fP is \fCBASE\fP, ie. the revision the \fCSRC\fP (locally) is at. +\fC The default value for \fCrev\fP is \fCBASE\fP, ie. the revision the \fCSRC\fP (locally) is at.\fP .PP -Please note that this command works \fBalways\fP on a directory \fBstructure\fP - if you say to copy a directory, the \fBwhole\fP structure is marked as copy. That means that if some entries below the copy are missing, they are reported as removed from the copy on the next commit. +\fC Please note that this command works \fBalways\fP on a directory \fBstructure\fP - if you say to copy a directory, the \fBwhole\fP structure is marked as copy. That means that if some entries below the copy are missing, they are reported as removed from the copy on the next commit. .br - (Of course it is possible to mark files as copied, too; non-recursive copies are not possible.) + (Of course it is possible to mark files as copied, too; non-recursive copies are not possible.)\fP +.PP +\fC .PP \fBNote:\fP .RS 4 Or TODO: There will be differences in the exact usage - \fCcopy\fP will try to run the \fCcp\fP command, whereas \fCcopied\fP will just remember the relation. .RE .PP -If this command are used without parameters, the currently defined relations are printed; please keep in mind that the \fBkey\fP is the destination name, ie. the 2nd line of each pair! +If this command are used without parameters, the currently defined relations are printed; please keep in mind that the \fBkey\fP is the destination name, ie. the 2nd line of each pair!\fP .PP -The input format for \fCload\fP is newline-separated - first a \fCSRC\fP line, followed by a \fCDEST\fP line, then an line with just a dot (\fC'.'\fP) as delimiter. If you've got filenames with newlines or other special characters, you have to give the paths as arguments. +\fC The input format for \fCload\fP is newline-separated - first a \fCSRC\fP line, followed by a \fCDEST\fP line, then an line with just a dot (\fC'.'\fP) as delimiter. If you've got filenames with newlines or other special characters, you have to give the paths as arguments.\fP .PP -Internally the paths are stored relative to the working copy base directory, and they're printed that way, too. +\fC Internally the paths are stored relative to the working copy base directory, and they're printed that way, too.\fP .PP -Later definitions are \fBappended\fP to the internal database; to undo mistakes, use the \fBrevert\fP action. +\fC Later definitions are \fBappended\fP to the internal database; to undo mistakes, use the \fBrevert\fP action.\fP +.PP +\fC .PP \fBNote:\fP .RS 4 @@ -371,7 +468,9 @@ As subversion currently treats a rename as copy+delete, the \fBmv\fP command is an alias to \fBcp\fP. .RE .PP -If you have a need to give the filenames \fCdump\fP or \fCload\fP as first parameter for copyfrom relations, give some path, too, as in \fC./dump\fP. +If you have a need to give the filenames \fCdump\fP or \fCload\fP as first parameter for copyfrom relations, give some path, too, as in \fC./dump\fP.\fP +.PP +\fC .PP \fBNote:\fP .RS 4 @@ -390,25 +489,29 @@ But it is not implementd to give an URL as copyfrom source directly - we'd have to fetch a list (and possibly the data!) from the repository. .RE .PP +\fP .SH "copyfrom-detect" .PP +\fC .PP .nf fsvs copyfrom-detect [paths...] + .fi .PP +\fP .PP -This command tells \fCfsvs\fP to look through the new entries, and see whether it can find some that seem to be copied from others already known. +\fC This command tells \fCfsvs\fP to look through the new entries, and see whether it can find some that seem to be copied from others already known. .br - It will output a list with source and destination path and why it could match. + It will output a list with source and destination path and why it could match.\fP .PP -This is just for information purposes and doesn't change any FSVS state, \fIunless some option/parameter is set. (TODO)\fP +\fC This is just for information purposes and doesn't change any FSVS state, \fIunless some option/parameter is set. (TODO)\fP\fP .PP -The list format is \fBon purpose\fP incompatible with the \fCload\fP syntax, as the best match normally has to be taken manually. +\fC The list format is \fBon purpose\fP incompatible with the \fCload\fP syntax, as the best match normally has to be taken manually.\fP .PP -If \fBverbose\fP is used, an additional value giving the percentage of matching blocks, and the count of possibly copied entries is printed. +\fC If \fBverbose\fP is used, an additional value giving the percentage of matching blocks, and the count of possibly copied entries is printed.\fP .PP -Example: +\fC Example: .PP .nf $ fsvs copyfrom-list -v @@ -428,8 +531,9 @@ .fi .PP +\fP .PP -The abbreviations are: \fImd5\fP The \fBMD5\fP of the new file is identical to that of one or more already committed files; there is no percentage. +\fC The abbreviations are: \fImd5\fP The \fBMD5\fP of the new file is identical to that of one or more already committed files; there is no percentage. .PP \fIinode\fP The \fBdevice/inode\fP number is identical to the given known entry; this could mean that the old entry has been renamed or hardlinked. \fBNote:\fP Not all filesystems have persistent inode numbers (eg. NFS) - so depending on your filesystems this might not be a good indicator! .PP @@ -441,6 +545,9 @@ .br The percentage is (number_of_common_entries)/(files_in_dir1 + files_in_dir2 - number_of_common_entries). .PP +\fP +.PP +\fC .PP \fBNote:\fP .RS 4 @@ -449,67 +556,115 @@ If too many possible matches are found, not all may be printed; only the indicator \fC...\fP is shown at the end. .RE .PP +\fP +.SH "uncp" +.PP +\fC +.PP +.nf + fsvs uncopy DEST [DEST ...] + +.fi +.PP +\fP +.PP +\fC The \fCuncopy\fP command removes a \fCcopyfrom\fP mark from the destination entry. This will make the entry unknown again, and reported as \fCNew\fP on the next invocations.\fP +.PP +\fC Only the base of a copy can be un-copied; if a directory structure was copied, and the given entry is just implicitly copied, this command will give you an error.\fP +.PP +\fC This is not folded in \fBrevert\fP, because it's not clear whether \fCrevert\fP should restore the original copyfrom data or remove the copy attribute; by using a special command this is no longer ambiguous.\fP +.PP +\fC Example: +.PP +.nf + $ fsvs copy SourceFile DestFile + # Whoops, was wrong! + $ fsvs uncopy DestFile + +.fi +.PP +\fP +.PP +\fC\fP .SH "diff" .PP +\fC .PP .nf fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...] + .fi .PP +\fP .PP -This command gives you diffs between local and repository files. +\fC This command gives you diffs between local and repository files.\fP .PP -With \fC-v\fP the meta-data is additionally printed, and changes shown. +\fC With \fC-v\fP the meta-data is additionally printed, and changes shown.\fP .PP -If you don't give the revision arguments, you get a diff of the base revision in the repository (the last commit) against your current local file. With one revision, you diff this repository version against you local file. With both revisions given, the difference between these repository versions is calculated. +\fC If you don't give the revision arguments, you get a diff of the base revision in the repository (the last commit) against your current local file. With one revision, you diff this repository version against you local file. With both revisions given, the difference between these repository versions is calculated.\fP .PP -You'll need the \fCdiff\fP program, as the files are simply passed as parameters to it. +\fC You'll need the \fCdiff\fP program, as the files are simply passed as parameters to it.\fP .PP -The default is to do non-recursive diffs; so \fCfsvs diff .\fP will output the changes in all files \fBin the current directory\fP. +\fC The default is to do non-recursive diffs; so \fCfsvs diff .\fP will output the changes in all files \fBin the current directory\fP.\fP .PP -The output for non-files is not defined. +\fC The output for non-files is not defined.\fP .PP -For entries marked as copy the diff against the (clean) source entry is printed. +\fC For entries marked as copy the diff against the (clean) source entry is printed.\fP .PP -Please see also \fBOptions relating to the 'diff' action\fP and \fBUsing colordiff\fP. +\fC Please see also \fBOptions relating to the 'diff' action\fP and \fBUsing colordiff\fP.\fP +.PP +\fC \fP .SH "export" .PP +\fC .PP .nf fsvs export REPOS_URL [-r rev] + .fi .PP +\fP +.PP +\fC If you want to export a directory from your repository \fBwithout\fP having to have an WAA-area, you can use this command. This restores all meta-data - owner, group, access mask and modification time. Its primary use is for data recovery.\fP .PP -If you want to export a directory from your repository \fBwithout\fP having to have an WAA-area, you can use this command. This restores all meta-data - owner, group, access mask and modification time. Its primary use is for data recovery. +\fC The data gets written (in the correct directory structure) below the current working directory; if entries already exist, the export will stop, so this should be an empty directory.\fP .PP -The data gets written (in the correct directory structure) below the current working directory; if entries already exist, the export will stop, so this should be an empty directory. +\fC\fP .SH "help" .PP +\fC .PP .nf help [command] + .fi .PP +\fP +.PP +\fC This command shows general or specific \fBhelp\fP (for the given command). A similar function is available by using \fC-h\fP or \fC-\fP? after a command.\fP .PP -This command shows general or specific \fBhelp\fP (for the given command). A similar function is available by using \fC-h\fP or \fC-\fP? after a command. +\fC\fP .SH "ignore" .PP +\fC .PP .nf - fsvs ignore [prepend|append|at=n] pattern[s] fsvs ignore dump|load + fsvs ignore [prepend|append|at=n] pattern [pattern ...] + .fi .PP +\fP .PP -This command adds patterns to the end of the ignore list, or, with \fIprepend\fP , puts them at the beginning of the list. With \fCat=x\fP the patterns are inserted at the position \fCx\fP , counting from 0. +\fC This command adds patterns to the end of the ignore list, or, with \fIprepend\fP , puts them at the beginning of the list. With \fCat=x\fP the patterns are inserted at the position \fCx\fP , counting from 0.\fP .PP -\fCfsvs dump\fP prints the patterns to \fCSTDOUT\fP . If there are special characters like \fCCR\fP or \fCLF\fP embedded in the pattern \fBwithout encoding\fP (like \fC\\r\fP or \fC\\n\fP), the output will be garbled. +\fC \fCfsvs dump\fP prints the patterns to \fCSTDOUT\fP . If there are special characters like \fCCR\fP or \fCLF\fP embedded in the pattern \fBwithout encoding\fP (like \fC\\r\fP or \fC\\n\fP), the output will be garbled.\fP .PP -The patterns may include \fC*\fP and \fC\fP? as wildcards in one directory level, or \fC**\fP for arbitrary strings. +\fC The patterns may include \fC*\fP and \fC\fP? as wildcards in one directory level, or \fC**\fP for arbitrary strings.\fP .PP -These patterns are only matched against new files; entries that are already versioned are not invalidated. If the given path matches a new directory, entries below aren't found, either; but if this directory or entries below are already versioned, the pattern doesn't work, as the match is restricted to the directory. +\fC These patterns are only matched against new files; entries that are already versioned are not invalidated. If the given path matches a new directory, entries below aren't found, either; but if this directory or entries below are already versioned, the pattern doesn't work, as the match is restricted to the directory.\fP .PP -So: +\fC So: .PP .nf fsvs ignore ./tmp @@ -519,50 +674,95 @@ ignores the directory \fCtmp\fP; but if it has already been committed, existing entries would have to be unmarked with \fBfsvs unversion\fP. Normally it's better to use .PP .nf - fsvs ignore ./tmp/§** + fsvs ignore ./tmp/** .fi .PP - as that takes the directory itself (which might be needed after restore as a mount point), but ignore \fBall\fP entries below. + as that takes the directory itself (which might be needed after restore as a mount point), but ignore \fBall\fP entries below.\fP .PP -Other special variants are available, see the documentation \fBUsing ignore patterns\fP . +\fC Other special variants are available, see the documentation \fBUsing ignore patterns\fP .\fP .PP -Examples: +\fC Examples: .PP .nf fsvs ignore ./proc fsvs ignore ./dev/pts - fsvs ignore './var/log/§*-*' - fsvs ignore './§**~' - fsvs ignore './§**§/§*.bak' - fsvs ignore prepend 't./§**.txt' - fsvs ignore append 't./§**.svg' - fsvs ignore at=1 './§**.tmp' + fsvs ignore './var/log/*-*' + fsvs ignore './**~' + fsvs ignore './**/*.bak' + fsvs ignore prepend 't./**.txt' + fsvs ignore append 't./**.svg' + fsvs ignore at=1 './**.tmp' fsvs ignore dump fsvs ignore dump -v - echo './§**.doc' | fsvs ignore load + echo './**.doc' | fsvs ignore load .fi .PP +\fP +.PP +\fC .PP \fBNote:\fP .RS 4 Please take care that your wildcard patterns are not expanded by the shell! .RE .PP +\fP +.SH "rign" +.PP +\fC +.PP +.nf + fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...] + fsvs ri [prepend|append|at=n] path-spec [path-spec ...] + +.fi +.PP +\fP +.PP +\fC If you use more than a single working copy for the same data, it will be stored in different paths - and that makes absolute ignore patterns infeasible. But relative ignore patterns are anchored at the beginning of the WC root - which is a bit tiring if you're deep in your WC hierarchy and want to ignore some files.\fP +.PP +\fC To make that easier you can use the \fCrel-ignore\fP (abbreviated as \fCri\fP) command; this converts all given path-specifications (that may include wildcards as per the shell pattern specification above) to WC-relative values before storing them.\fP +.PP +\fC Example for \fC/etc\fP as working copy root: +.PP +.nf + fsvs rel-ignore '/etc/X11/xorg.conf.*' + + cd /etc/X11 + fsvs rel-ignore 'xorg.conf.*' + +.fi +.PP + Both commands would store the pattern './X11/xorg.conf.*'.\fP +.PP +\fC +.PP +\fBNote:\fP +.RS 4 +This works only for \fBshell patterns\fP. +.RE +.PP +For more details about ignoring files please see the \fBignore\fP command and \fBUsing ignore patterns\fP.\fP +.PP +\fC\fP .SH "info" .PP +\fC .PP .nf fsvs info [-R [-R]] [PATH...] + .fi .PP +\fP .PP -Use this command to show information regarding one or more entries in your working copy. Currently you must be at the working copy root; but that will change. You can use \fC-v\fP to obtain slightly more information. +\fC Use this command to show information regarding one or more entries in your working copy. Currently you must be at the working copy root; but that will change. You can use \fC-v\fP to obtain slightly more information.\fP .PP -This may sometimes be helpful for locating bugs, or to obtain the URL and revision a working copy is currently at. +\fC This may sometimes be helpful for locating bugs, or to obtain the URL and revision a working copy is currently at.\fP .PP -Example: +\fC Example: .PP .nf $ fsvs info @@ -582,45 +782,55 @@ .fi .PP +\fP .PP -The default is to print information about the given entry only. With a single \fC-R\fP you'll get this data about \fBall\fP entries of a given directory; with a second \fC-R\fP you'll get the whole (sub-)tree. +\fC The default is to print information about the given entry only. With a single \fC-R\fP you'll get this data about \fBall\fP entries of a given directory; with a second \fC-R\fP you'll get the whole (sub-)tree.\fP +.PP +\fC\fP .SH "log" .PP +\fC .PP .nf fsvs log [-v] [-r rev1[:rev2]] [path] + .fi .PP +\fP .PP -This command views the log information associated with the given \fIpath\fP, or, if none, the highest priority URL. +\fC This command views the log information associated with the given \fIpath\fP, or, if none, the highest priority URL.\fP .PP -The optional \fIrev1\fP and \fIrev2\fP can be used to restrict the revisions that are shown; if no values are given, the logs are given starting from HEAD downwards. +\fC The optional \fIrev1\fP and \fIrev2\fP can be used to restrict the revisions that are shown; if no values are given, the logs are given starting from \fCHEAD\fP downwards, and then a limit on the number of revisions is applied (but see the \fBlimit\fP option).\fP .PP -If you use the \fB-v\fP -option, you get the files changed in each revision printed, too. +\fC If you use the \fB-v\fP -option, you get the files changed in each revision printed, too.\fP .PP -Currently at most 100 log messages are shown. +\fC There is an option controlling the output format; see \fB'fsvs log' output format\fP.\fP .PP -There is an option controlling the output format; see \fB'fsvs log' output format\fP. -.PP -TODOs: +\fC TODOs: .IP "\(bu" 2 \fC--stop-on-copy\fP .IP "\(bu" 2 Show revision for \fBall\fP URLs associated with a working copy? In which order? .IP "\(bu" 2 A URL-parameter, to specify the log URL. (Name) -.IP "\(bu" 2 -Limit number of revisions shown? .PP +\fP +.PP +\fC\fP .SH "prop-get" .PP +\fC .PP .nf fsvs prop-get PROPERTY-NAME PATH... + .fi .PP +\fP .PP -You get the data of the property printed to STDOUT. +\fC You get the data of the property printed to STDOUT.\fP +.PP +\fC .PP \fBNote:\fP .RS 4 @@ -629,74 +839,104 @@ If you want a safe way to look at the properties, use prop-list with the \fC-v\fP parameter. .RE .PP +\fP .SH "prop-set" .PP +\fC .PP .nf fsvs prop-set PROPERTY-NAME VALUE PATH... + .fi .PP +\fP +.PP +\fC This command sets an arbitrary property value for the given path(s).\fP .PP -This command sets an arbitrary property value for the given path(s). +\fC .PP \fBNote:\fP .RS 4 Some property prefixes are reserved; currently everything starting with \fCsvn:\fP throws a (fatal) warning, and \fCfsvs:\fP is already used, too. See \fBSpecial property names\fP. .RE .PP +\fP .SH "prop-del" .PP +\fC .PP .nf fsvs prop-del PROPERTY-NAME PATH... + .fi .PP +\fP +.PP +\fC This command removes property value for the given path(s).\fP .PP -This command removes property value for the given path(s). +\fC See also \fBprop-set\fP.\fP .PP -See also \fBprop-set\fP +\fC\fP .SH "prop-list" .PP +\fC .PP .nf fsvs prop-list [-v] PATH... + .fi .PP +\fP +.PP +\fC Lists the names of all properties for the given entry. With \fC-v\fP, the value is printed as well; special characters will be translated, to not mess with your terminal.\fP .PP -Lists the names of all properties for the given entry. With \fC-v\fP, the value is printed as well; special characters will be translated, to not mess with your terminal. +\fC If you need raw output, post a patch for \fC--raw\fP, or loop with \fBprop-get\fP.\fP .PP -If you need raw output, post a patch for \fC--raw\fP, or loop with \fBprop-get\fP. +\fC\fP .SH "remote-status" .PP +\fC .PP .nf fsvs remote-status PATH [-r rev] + .fi .PP +\fP +.PP +\fC This command looks into the repository and tells you which files would get changed on an \fBupdate\fP - it's a dry-run for \fBupdate\fP .\fP .PP -This command looks into the repository and tells you which files would get changed on an \fBupdate\fP - it's a dry-run for \fBupdate\fP . +\fC Per default it compares to \fCHEAD\fP, but you can choose another revision with the \fC-r\fP parameter.\fP .PP -Per default it compares to \fCHEAD\fP, but you can choose another revision with the \fC-r\fP parameter. +\fC\fP .SH "resolve" .PP +\fC .PP .nf fsvs resolve PATH [PATH...] + .fi .PP +\fP +.PP +\fC When FSVS tries to update local files which have been changed, a conflict might occur. (For various ways of handling these please see the \fBconflict\fP option.)\fP .PP -When FSVS tries to update local files which have been changed, a conflict might occur. (For various ways of handling these please see the \fBconflict\fP option.) +\fC This command lets you mark such conflicts as resolved.\fP .PP -This command lets you mark such conflicts as resolved. +\fC\fP .SH "revert" .PP +\fC .PP .nf fsvs revert [-rRev] [-R] PATH [PATH...] + .fi .PP +\fP .PP -This command undoes local modifications: +\fC This command undoes local modifications: .IP "\(bu" 2 An entry that is marked to be unversioned gets this flag removed. .IP "\(bu" 2 @@ -704,58 +944,67 @@ .IP "\(bu" 2 An entry that is a copy destination, but modified, gets reverted to the copy source data. .IP "\(bu" 2 -An unmodified direct copy destination entry, and other uncommitted entries with special flags (manually added, or defined as copied), are changed back to 'N'ew -- the copy definition and the special status is removed. +An unmodified direct copy destination entry, and other uncommitted entries with special flags (manually added, or defined as copied), are changed back to \fI'N'\fPew -- the copy definition and the special status is removed. .br Please note that on implicitly copied entries (entries that are marked as copied because some parent directory is the base of a copy) \fBcannot\fP be un-copied; they can only be reverted to their original (copied-from) data, or removed. .PP +\fP .PP -See also \fBHOWTO: Understand the entries' statii\fP. +\fC See also \fBHOWTO: Understand the entries' statii\fP.\fP .PP -If a directory is given on the command line \fBall known entries in this directory\fP are reverted to the old state; this behaviour can be modified with \fB-R/-N\fP, or see below. +\fC If a directory is given on the command line \fBall known entries in this directory\fP are reverted to the old state; this behaviour can be modified with \fB-R/-N\fP, or see below.\fP .PP -The reverted entries are printed, along with the status they had \fBbefore\fP the revert (because the new status is per definition \fIunchanged\fP). +\fC The reverted entries are printed, along with the status they had \fBbefore\fP the revert (because the new status is per definition \fIunchanged\fP).\fP .PP -If a revision is given, the entries' data is taken from this revision; furthermore, the \fBnew\fP status of that entry is shown. +\fC If a revision is given, the entries' data is taken from this revision; furthermore, the \fBnew\fP status of that entry is shown. .PP \fBNote:\fP .RS 4 Please note that mixed revision working copies are not possible; the \fIBASE\fP revision is not changed, and a simple \fCrevert\fP without a revision arguments gives you that. .RE .PP +\fP .SS "Difference to update" -If you find that something doesn't work as it should, you can revert entries until you are satisfied, and directly \fBcommit\fP the new state. +\fC If you find that something doesn't work as it should, you can revert entries until you are satisfied, and directly \fBcommit\fP the new state.\fP .PP -In contrast, if you \fBupdate\fP to an older version, you +\fC In contrast, if you \fBupdate\fP to an older version, you .IP "\(bu" 2 cannot choose single entries (no mixed revision working copies), .IP "\(bu" 2 and you cannot commit the old version with changes, as later changes will create conflicts in the repository. .PP +\fP +.PP .SS "Currently only known entries are handled." -If you need a switch (like \fC--delete\fP in \fCrsync(1)\fP ) to remove unknown (new, not yet versioned) entries, to get the directory in the exact state it is in the repository, say so. +\fC If you need a switch (like \fC--delete\fP in \fCrsync(1)\fP ) to remove unknown (new, not yet versioned) entries, to get the directory in the exact state it is in the repository, say so.\fP +.PP +\fC \fP .SS "If a path is specified whose parent is missing, \\c" -fsvs complains. We plan to provide a switch (probably \fC-p\fP), which would create (a sparse) tree up to this entry. +\fC fsvs complains. We plan to provide a switch (probably \fC-p\fP), which would create (a sparse) tree up to this entry.\fP +.PP .SS "Recursive behaviour" -When the user specifies a non-directory entry (file, device, symlink), this entry is reverted to the old state. This is the easy case. +\fC When the user specifies a non-directory entry (file, device, symlink), this entry is reverted to the old state. This is the easy case.\fP .PP -If the user specifies a directory entry, see this table for the restoration results: command line switchresult \fC-N\fP this directory only (meta-data), none this directory, and direct children of the directory, \fC-R\fP this directory, and the complete tree below. -.SS "Working with copied entries" -If an entry is marked as copied from another entry (and not committed!), a \fCrevert\fP will undo the copy setting - which will make the entry unknown again, and reported as new on the next invocations. +\fC If the user specifies a directory entry, see this table for the restoration results: command line switchresult \fC-N\fP this directory only (meta-data), none this directory, and direct children of the directory, \fC-R\fP this directory, and the complete tree below. \fP .PP -If a directory structure was copied, and the current entry is just a implicitly copied entry, \fCrevert\fP would take the copy source as reference, and \fBget the file data\fP from there. +.SS "Working with copied entries" +\fC If an entry is marked as copied from another entry (and not committed!), a \fCrevert\fP will fetch the original copyfrom source. To undo the copy setting use the \fBuncp\fP command.\fP .PP -Summary: \fIOnly the base of a copy can be un-copied.\fP +\fC\fP .SH "status" .PP +\fC .PP .nf fsvs status [-C [-C]] [-v] [PATHs...] + .fi .PP +\fP .PP -This command shows the entries that have changed since the last commit. +\fC This command shows the entries that have changed since the last commit.\fP .PP -The output is formatted as follows: +\fC The output is formatted as follows: .IP "\(bu" 2 A status columns of four (or, with \fC-v\fP , five) characters. There are either flags or a '.' printed, so that it's easily parsed by scripts -- the number of columns is only changed by \fB-q, -v -- verbose/quiet\fP. .IP "\(bu" 2 @@ -763,24 +1012,25 @@ .IP "\(bu" 2 The path and name of the entry, formatted by the option \fBDisplaying paths\fP. .PP +\fP .PP -The status column can show the following flags: +\fC The status column can show the following flags: .IP "\(bu" 2 Normally only changed entries are printed; with -v all are printed. The command line option \fC-v\fP additionally causes the \fC'm'\fP -flag to be split into two, see below. .IP "\(bu" 2 -\fC'D'\fP and \fC'N'\fP are used for \fIdeleted\fP and \fInew\fP entries. +\fC 'D'\fP and \fC'N'\fP are used for \fIdeleted\fP and \fInew\fP entries. .IP "\(bu" 2 -\fC'd'\fP and \fC'n'\fP are used for entries which are to be unversioned or added on the next commit; the characters were chosen as \fIlittle delete\fP (only in the repository, not removed locally) and \fIlittle new\fP (although \fBignored\fP). See \fBadd\fP and \fBunversion\fP. +\fC 'd'\fP and \fC'n'\fP are used for entries which are to be unversioned or added on the next commit; the characters were chosen as \fIlittle delete\fP (only in the repository, not removed locally) and \fIlittle new\fP (although \fBignored\fP). See \fBadd\fP and \fBunversion\fP. .br If such an entry does not exist, it is marked with an \fC'!'\fP -- because it has been manually marked, and for both types removing the entry makes no sense. .IP "\(bu" 2 A changed type (character device to symlink, file to directory etc.) is given as \fC'R'\fP (replaced), ie. as removed and newly added. .IP "\(bu" 2 -If the entry has been modified, the change is shown as \fC'C'\fP. + If the entry has been modified, the change is shown as \fC'C'\fP. .br - If the modification or status change timestamps (mtime, ctime) are changed, but the size is still the same, the entry is marked as possibly changed (a question mark \fC'\fP?' is printed). See \fBopt_checksum\fP. + If the modification or status change timestamps (mtime, ctime) are changed, but the size is still the same, the entry is marked as possibly changed (a question mark \fC'\fP?' is printed) - but see \fBchange detection\fP for details. .IP "\(bu" 2 -The meta-data flag \fC'm'\fP shows meta-data changes like properties, modification timestamp and/or the rights (owner, group, mode); depending on the \fB-v/-q\fP command line parameters, it may be splitted into \fC'P'\fP (properties), \fC't'\fP (time) and \fC'p'\fP (permissions). + The meta-data flag \fC'm'\fP shows meta-data changes like properties, modification timestamp and/or the rights (owner, group, mode); depending on the \fB-v/-q\fP command line parameters, it may be splitted into \fC'P'\fP (properties), \fC't'\fP (time) and \fC'p'\fP (permissions). .br If \fC'P'\fP is shown for the non-verbose case, it means \fBonly\fP property changes, ie. the entries filesystem meta-data is unchanged. .IP "\(bu" 2 @@ -788,8 +1038,9 @@ .IP "\(bu" 2 A \fC'x'\fP signifies a conflict. .PP +\fP .PP -Here's a table with the characters and their positions: +\fC Here's a table with the characters and their positions: .PP .nf @@ -803,71 +1054,92 @@ * .fi .PP +\fP .PP -Furthermore please take a look at \fBStatus output coloring\fP. +\fC Furthermore please take a look at \fBStatus output coloring\fP.\fP +.PP +\fC\fP .SH "sync-repos" .PP +\fC .PP .nf fsvs sync-repos [-r rev] [working copy base] + .fi .PP +\fP .PP -This command loads the file list from the repository. A following commit will send all differences and make the repository data identical to the local. +\fC This command loads the file list from the repository. A following commit will send all differences and make the repository data identical to the local.\fP .PP -This is normally not needed; the use cases are +\fC This is normally not needed; the use cases are .IP "\(bu" 2 debugging and .IP "\(bu" 2 -recovering from data loss in \fC$FSVS_WAA\fP (\fC/var/spool/fsvs\fP ). +recovering from data loss in \fB$FSVS_WAA\fP. .PP +\fP .PP -It is (currently) important if you want to backup two similar machines. Then you can commit one machine into a subdirectory of your repository, make a copy of that directory for another machine, and sync this other directory on the other machine. +\fC It is (currently) important if you want to backup two similar machines. Then you can commit one machine into a subdirectory of your repository, make a copy of that directory for another machine, and sync this other directory on the other machine.\fP .PP -A commit then will transfer only _changed_ files; so if the two machines share 2GB of binaries (\fC/usr\fP , \fC/bin\fP , \fC/lib\fP , ...) then these 2GB are still shared in the repository, although over time they will deviate (as both committing machines know nothing of the other path with identical files). +\fC A commit then will transfer only _changed_ files; so if the two machines share 2GB of binaries (\fC/usr\fP , \fC/bin\fP , \fC/lib\fP , ...) then these 2GB are still shared in the repository, although over time they will deviate (as both committing machines know nothing of the other path with identical files).\fP .PP -This kind of backup could be substituted by several levels of repository paths, which get 'overlayed' in a defined priority. So the base directory, which all machines derive from, will be committed from one machine, and it's no longer necessary for all machines to send identical files into the repository. +\fC This kind of backup could be substituted by several levels of repository paths, which get 'overlayed' in a defined priority. So the base directory, which all machines derive from, will be committed from one machine, and it's no longer necessary for all machines to send identical files into the repository.\fP .PP -The revision argument should only ever be used for debugging; if you fetch a filelist for a revision, and then commit against later revisions, problems are bound to occur. +\fC The revision argument should only ever be used for debugging; if you fetch a filelist for a revision, and then commit against later revisions, problems are bound to occur.\fP +.PP +\fC .PP \fBNote:\fP .RS 4 There's an issue in subversion, to collapse identical files in the repository into a single storage. That would ease the simple backup example, in that there's not so much storage needed over time; but the network transfers would still be much more than needed. .RE .PP +\fP .SH "update" .PP +\fC .PP .nf - ## invalid ## fsvs update [-r rev] [working copy base] + fsvs update [-r rev] [working copy base] fsvs update [-u url@rev ...] [working copy base] + .fi .PP +\fP .PP -This command does an update on all specified URLs for the current working copy, or, if none is given via \fB-u\fP, \fBall\fP URLs. +\fC This command does an update on all specified URLs for the current working copy, or, if none is given via \fB-u\fP, \fBall\fP URLs.\fP .PP -It first reads all changes in the repositories, overlays them (so that only the highest-priority entries are used), and fetches all necessary changes. +\fC It first reads all changes in the repositories, overlays them (so that only the highest-priority entries are used), and fetches all necessary changes.\fP +.PP +\fC\fP .SH "urls" .PP +\fC .PP .nf fsvs urls URL [URLs...] fsvs urls dump fsvs urls load + .fi .PP +\fP .PP -Initializes a working copy administrative area and connects \fCthe\fP current working directory to \fCREPOS_URL\fP. All commits and updates will be done to this directory and against the given URL. +\fC Initializes a working copy administrative area and connects \fCthe\fP current working directory to \fCREPOS_URL\fP. All commits and updates will be done to this directory and against the given URL.\fP .PP -Example: +\fC Example: .PP .nf fsvs urls http://svn/repos/installation/machine-1/trunk .fi .PP +\fP .PP -For a format definition of the URLs please see the chapter \fBFormat of URLs\fP. +\fC For a format definition of the URLs please see the chapter \fBFormat of URLs\fP.\fP +.PP +\fC .PP \fBNote:\fP .RS 4 @@ -880,10 +1152,11 @@ .PP .RE .PP +\fP .SS "Loading URLs" -You can load a list of URLs from \fCSTDIN\fP; use the \fCload\fP subcommand for that. +\fC You can load a list of URLs from \fCSTDIN\fP; use the \fCload\fP subcommand for that.\fP .PP -Example: +\fC Example: .PP .nf ( echo 'N:local,prio:10,http://svn/repos/install/machine-1/trunk' ; @@ -892,14 +1165,16 @@ .fi .PP +\fP +.PP +\fC Empty lines are ignored.\fP .PP -Empty lines are ignored. .SS "Dumping the defined URLs" -To see which URLs are in use for the current WC, you can use \fCdump\fP. +\fC To see which URLs are in use for the current WC, you can use \fCdump\fP.\fP .PP -As an optional parameter you can give a format statement; \fCp\fP , \fCn\fP , \fCr\fP, \fCt\fP and \fCu\fP are substituted by the priority, name, current revision, target revision and URL. Note: That's not a real \fCprintf()-format\fP; only these and a few \fC\\\fP sequences are recognized. +\fC As an optional parameter you can give a format statement; \fCp\fP , \fCn\fP , \fCr\fP, \fCt\fP and \fCu\fP are substituted by the priority, name, current revision, target revision and URL. Note: That's not a real \fCprintf()-format\fP; only these and a few \fC\\\fP sequences are recognized.\fP .PP -Example: +\fC Example: .PP .nf fsvs urls dump ' %u %n:%p\\n' @@ -908,7 +1183,8 @@ .fi .PP +\fP .PP -The default format is \fC'N:%n,P:%p,D:%t,%u\\\\n'\fP; for a more readable version you can use \fB-v\fP. +\fC The default format is \fC'N:%n,P:%p,D:%t,%u\\\\n'\fP; for a more readable version you can use \fB-v\fP. \fP .PP diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/doc/fsvs-howto-backup.5 /tmp/FWQOjxAJQ0/fsvs-1.1.17/doc/fsvs-howto-backup.5 --- fsvs-1.1.14/doc/fsvs-howto-backup.5 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/doc/fsvs-howto-backup.5 2008-10-29 08:19:27.000000000 +0000 @@ -0,0 +1,196 @@ +.TH "FSVS - Backup HOWTO" 5 "29 Oct 2008" "Version trunk:2782" "fsvs" \" -*- nroff -*- +.ad l +.nh +.SH NAME +FSVS - Backup HOWTO +.SH "Preparation" +.PP +If you're going to back up your system, you have to decide what you want to have stored in your backup, and what should be left out. +.PP +Depending on your system usage and environment you first have to decide: +.PD 0 + +.IP "\(bu" 2 +Do you only want to backup your data in \fC/home\fP? +.PD 0 + +.IP " \(bu" 4 +Less storage requirements +.IP " \(bu" 4 +In case of hardware crash the OS must be set up again +.PP + +.IP "\(bu" 2 +Do you want to keep track of your configuration in \fCetc\fP? +.PD 0 + +.IP " \(bu" 4 +Very small storage overhead +.IP " \(bu" 4 +Not much use for backup/restore, but shows what has been changed +.PP + +.IP "\(bu" 2 +Or do you want to backup your whole installation, from \fC/\fP on? +.PD 0 + +.IP " \(bu" 4 +Whole system versioned, restore is only a few commands +.IP " \(bu" 4 +Much more storage space needed - typically you'd need at least a few GB free space. +.PP + +.PP +.PP +The next few moments should be spent thinking about the storage space for the repository - will it be on the system harddisk, a secondary or an external harddisk, or even off-site? +.PP +\fBNote:\fP +.RS 4 +If you just created a fresh repository, you probably should create the 'default' directory structure for subversion - \fCtrunk\fP, \fCbranches\fP, \fCtags\fP; this layout might be useful for your backups. +.br + The URL you'd use in fsvs would go to \fCtrunk\fP. +.RE +.PP +Possibly you'll have to take the available bandwidth into your considerations; a single home directory may be backed up on a 56k modem, but a complete system installation would likely need at least some kind of DSL or LAN. +.PP +\fBNote:\fP +.RS 4 +If this is a production box with sparse, small changes, you could take the initial backup on a local harddisk, transfer the directory with some media to the target machine, and switch the URLs. +.RE +.PP +A fair bit of time should go to a small investigation which file patterns and paths you \fBnot\fP want to back-up. +.PD 0 + +.IP "\(bu" 2 +Backup files like \fC*\fP.bak, \fC*~\fP, \fC*\fP.tmp, and similar +.IP "\(bu" 2 +History files: \fC.sh-history\fP and similar in the home-directories +.IP "\(bu" 2 +Cache directories: your favourite browser might store many MB of cached data in you home-directories +.IP "\(bu" 2 +Virtual system directories, like \fC/proc\fP and \fC/sys\fP, \fC/dev/shmfs\fP. +.PP +.SH "Telling FSVS what to do" +.PP +Given \fC$WC\fP as the \fIworking directory\fP - the base of the data you'd like backup'ed (\fC/\fP, \fC/home\fP), and \fC$URL\fP as a valid subversion URL to your (already created) repository path. +.PP +Independent of all these details the first steps look like these: +.PP +.nf + cd $WC + fsvs urls $URL + +.fi +.PP + Now you have to say what should be ignored - that'll differ depending on your needs/wishes. +.PP +.nf + fsvs ignore './**~' './**.tmp' './**.bak' + fsvs ignore ./proc/ ./sys/ ./tmp/ + fsvs ignore ./var/tmp/ ./var/spool/lpd/ + fsvs ignore './var/log/*.gz' + fsvs ignore ./var/run/ /dev/pts/ + fsvs ignore './etc/*.dpkg-dist' './etc/*.dpkg-new' + fsvs ignore './etc/*.dpkg-old' './etc/*.dpkg-bak' + +.fi +.PP +.PP +\fBNote:\fP +.RS 4 +\fC/var/run\fP is for transient files; I've heard reports that \fBreverting\fP files there can cause problems with running programs. +.br + Similar for \fC/dev/pts\fP - if that's a \fCdevpts\fP filesystem, you'll run into problems on \fBupdate\fP or \fBrevert\fP - as FSVS won't be allowed to create entries in this directory. +.RE +.PP +Now you may find that you'd like to have some files encrypted in your backup - like \fC/etc/shadow\fP, or your \fC\fP.ssh/id_* files. So you tell fsvs to en/decrypt these files: +.PP +.nf + fsvs propset fsvs:commit-pipe 'gpg -er {your backup key}' /etc/shadow /etc/gshadow + fsvs propset fsvs:update-pipe 'gpg -d' /etc/shadow /etc/gshadow + +.fi +.PP +.PP +\fBNote:\fP +.RS 4 +This are just examples. You'll probably have to exclude some other paths and patterns from your backup, and mark some others as to-be-filtered. +.RE +.PP +.SH "The first backup" +.PP +.PP +.nf + fsvs commit -m 'First commit!' +.fi +.PP + That's all there is to it! +.SH "Further use and maintenance" +.PP +The further usage is more or less the \fCcommit\fP command from the last section. +.br + When do you have to do some manual work? +.PD 0 + +.IP "\(bu" 2 +When ignore patterns change. +.PD 0 + +.IP " \(bu" 4 +New filesystems that should be ignored, or would be ignored but shouldn't +.IP " \(bu" 4 +You find that your favorite word-processor leaves many *.segv files behind, and similar things +.PP + +.IP "\(bu" 2 +If you get an error message from fsvs, check the arguments and retry. In desperate cases (or just because it's quicker than debugging yourself) ask on \fCdev [at] fsvs.tigris.org\fP. +.PP +.SH "Restoration in a working system" +.PP +Depending on the circumstances you can take different ways to restore data from your repository. +.PD 0 + +.IP "\(bu" 2 +\fC 'fsvs export'\fP allows you to just dump some repository data into your filesystem - eg. into a temporary directory to sort things out. +.IP "\(bu" 2 +Using \fC'fsvs revert'\fP you can get older revisions of a given file, directory or directory tree inplace. +.br + +.IP "\(bu" 2 +Or you can do a fresh checkout - set an URL in an (empty) directory, and update to the needed revision. +.IP "\(bu" 2 +If everything else fails (no backup media with fsvs on it), you can use subversion commands (eg. \fCexport\fP) to restore needed parts, and update the rest with fsvs. +.PP +.SH "Recovery for a non-booting system" +.PP +In case of a real emergency, when your harddisks crashed or your filesystem was eaten and you have to re-partition or re-format, you should get your system working again by +.PD 0 + +.IP "\(bu" 2 +booting from a knoppix or some other Live-CD (with \fCfsvs\fP on it), +.IP "\(bu" 2 +partition/format as needed, +.IP "\(bu" 2 +mount your harddisk partitions below eg. \fC/mnt\fP, +.IP "\(bu" 2 +and then recovering by +.PP +.PP +.nf + $ cd /mnt + $ export FSVS_CONF=/etc/fsvs # if non-standard + $ export FSVS_WAA=/var/spool/fsvs # if non-standard + $ fsvs checkout -o softroot=/mnt +.fi +.PP +.PP +If somebody asks really nice I'd possibly even create a \fCrecovery\fP command that deduces the \fCsoftroot\fP parameter from the current working directory. +.PP +For more information please take a look at \fBUsing an alternate root directory\fP. +.SH "Feedback" +.PP +If you've got any questions, ideas, wishes or other feedback, please tell us in the mailing list \fCusers [at] fsvs.tigris.org\fP. +.PP +Thank you! +.PP + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/doc/fsvs-howto-master_local.5 /tmp/FWQOjxAJQ0/fsvs-1.1.17/doc/fsvs-howto-master_local.5 --- fsvs-1.1.14/doc/fsvs-howto-master_local.5 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/doc/fsvs-howto-master_local.5 2008-10-29 08:19:27.000000000 +0000 @@ -0,0 +1,307 @@ +.TH "FSVS - Master/Local HOWTO" 5 "29 Oct 2008" "Version trunk:2782" "fsvs" \" -*- nroff -*- +.ad l +.nh +.SH NAME +HOWTO: Master/Local repositories \- Please read the \fBHOWTO: Backup\fP first, to know about basic steps using FSVS. +.SH "Rationale" +.PP +If you manage a lot of machines with similar or identical software, you might notice that it's a bit of work keeping them all up-to-date. Sure, automating distribution via rsync or similar is easy; but then you get identical machines, or you have to play with lots of exclude patterns to keep the needed differences. +.PP +Here another way is presented; and even if you don't want to use FSVS for distributing your files, the ideas presented here might help you keep your machines under control. +.SH "Preparation, repository layout" +.PP +In this document the basic assumption is that there is a group of (more or less identical) machines, that share most of their filesystems. +.PP +Some planning should be done beforehand; while the ideas presented here might suffice for simple versioning, your setup can require a bit of thinking ahead. +.PP +This example uses some distinct repositories, to achieve a bit more clarity; of course these can simply be different paths in a single repository (see \fBUsing a single repository\fP for an example configuration). +.PP +Repository in URL \fCbase:\fP +.PP +.nf + trunk/ + bin/ + ls + true + lib/ + libc6.so + modules/ + sbin/ + mkfs + usr/ + local/ + bin/ + sbin/ + tags/ + branches/ + +.fi +.PP +.PP +Repository in URL \fCmachine1\fP (similar for machine2): +.PP +.nf + trunk/ + etc/ + HOSTNAME + adjtime + network/ + interfaces + passwd + resolv.conf + shadow + var/ + log/ + auth.log + messages + tags/ + branches/ + +.fi +.PP +.SS "User data versioning" +If you want to keep the user data versioned, too, a idea might be to start a new working copy in \fBevery\fP home directory; this way +.IP "\(bu" 2 +the system- and (several) user-commits can be run in parallel, +.IP "\(bu" 2 +the intermediate \fChome\fP directory in the repository is not needed, and +.IP "\(bu" 2 +you get a bit more isolation (against FSVS failures, out-of-space errors and similar). +.IP "\(bu" 2 +Furthermore FSVS can work with smaller file sets, which helps performance a bit (less dentries to cache at once, less memory used, etc.). +.PP +.PP +.PP +.nf + A/ + Andrew/ + .bashrc + .ssh/ + .kde/ + Alexander/ + .bashrc + .ssh/ + .kde/ + B/ + Bertram/ +.fi +.PP +.PP +A cronjob could simply loop over the directories in \fC/home\fP, and call fsvs for each one; giving a target URL name is not necessary if every home-directory is its own working copy. +.PP +\fBNote:\fP +.RS 4 +URL names can include a forward slash \fC/\fP in their name, so you might give the URLs names like \fChome/Andrew\fP - although that should not be needed, if every home directory is a distinct working copy. +.RE +.PP +.SH "Using master/local repositories" +.PP +Imagine having 10 similar machines with the same base-installation. +.PP +Then you install one machine, commit that into the repository as \fCbase/trunk\fP, and make a copy as \fCbase/released\fP. +.PP +The other machines get \fCbase/released\fP as checkout source, and another (overlaid) from eg. \fCmachine1/trunk\fP. +.br + Per-machine changes are always committed into the \fCmachineX/trunk\fP of the per-machine repository; this would be the host name, IP address, and similar things. +.PP +On the development machine all changes are stored into \fCbase/trunk\fP; if you're satisfied with your changes, you merge them (see \fBBranching, tagging, merging\fP) into \fCbase/released\fP, whereupon all other machines can update to this latest version. +.PP +So by looking at \fCmachine1/trunk\fP you can see the history of the machine-specific changes; and in \fCbase/released\fP you can check out every old version to verify problems and bugs. +.PP +\fBNote:\fP +.RS 4 +You can take this system a bit further: optional software packages could be stored in other subtrees. They should be of lower priority than the base tree, so that in case of conflicts the base should always be preferred (but see \fB1\fP). +.RE +.PP +Here is a small example; \fCmachine1\fP is the development machine, \fCmachine2\fP is a \fIclient\fP. +.PP +.nf + machine1$ fsvs urls name:local,P:200,svn+ssh://lserver/per-machine/machine1/trunk + machine1$ fsvs urls name:base,P:100,http://bserver/base-install1/trunk + # Determine differences, and commit them + machine1$ fsvs ci -o commit_to=local /etc/HOSTNAME /etc/network/interfaces /var/log + machine1$ fsvs ci -o commit_to=base / + +.fi +.PP +.PP +Now you've got a base-install in your repository, and can use that on the other machine: +.PP +.nf + machine2$ fsvs urls name:local,P:200,svn+ssh://lserver/per-machine/machine2/trunk + machine2$ fsvs urls name:base,P:100,http://bserver/base-install1/trunk + machine2$ fsvs sync-repos + # Now you see differences of this machines' installation against the other: + machine2$ fsvs st + # You can see what is different: + machine2$ fsvs diff /etc/X11/xorg.conf + # You can take the base installations files: + machine2$ fsvs revert /bin/ls + # And put the files specific to this machine into its repository: + machine2$ fsvs ci -o commit_to=local /etc/HOSTNAME /etc/network/interfaces /var/log + +.fi +.PP +.PP +Now, if this machine has a harddisk failure or needs setup for any other reason, you boot it (eg. via PXE, Knoppix or whatever), and do (\fB3\fP) +.PP +.nf + # Re-partition and create filesystems (if necessary) + machine2-knoppix$ fdisk ... + machine2-knoppix$ mkfs ... + # Mount everything below /mnt + machine2-knoppix$ mount /mnt/[...] + machine2-knoppix$ cd /mnt + # Do a checkout below /mnt + machine2-knoppix$ fsvs co -o softroot=/mnt + +.fi +.PP +.SH "Branching, tagging, merging" +.PP +Other names for your branches (instead of \fCtrunk\fP, \fCtags\fP and \fCbranches\fP) could be \fCunstable\fP, \fCtesting\fP, and \fCstable\fP; your production machines would use \fCstable\fP, your testing environment \fCtesting\fP, and in \fCunstable\fP you'd commit all your daily changes. +.PP +\fBNote:\fP +.RS 4 +Please note that there's no merging mechanism in FSVS; and as far as I'm concerned, there won't be. Subversion just gets automated merging mechanisms, and these should be fine for this usage too. (\fB4\fP) +.RE +.PP +.SS "Thoughts about tagging" +Tagging works just like normally; although you need to remember to tag more than a single branch. Maybe FSVS should get some knowledge about the subversion repository layout, so a \fCfsvs tag\fP would tag all repositories at once? It would have to check for duplicate tag-names (eg. on the \fCbase\fP -branch), and just keep it if it had the same copyfrom-source. +.PP +But how would tags be used? Define them as source URL, and checkout? Would be a possible case. +.PP +Or should \fCfsvs tag\fP do a \fImerge\fP into the repository, so that a single URL contains all files currently checked out, with copyfrom-pointers to the original locations? Would require using a single repository, as such pointers cannot be across different repositories. If the committed data includes the \fC$FSVS_CONF/\fP.../Urls file, the original layout would be known, too - although to use it a \fBsync-repos\fP would be necessary. +.SH "Using a single repository" +.PP +A single repository would have to be partitioned in the various branches that are needed for bookkeeping; see these examples. +.PP +Depending on the number of machines it might make sense to put them in a 1- or 2 level deep hierarchy; named by the first character, like +.PP +.PP +.nf + machines/ + A/ + Axel/ + Andreas/ + B/ + Berta/ + G/ + Gandalf/ +.fi +.PP +.SS "Simple layout" +Here only the base system gets branched and tagged; the machines simply backup their specific/localized data into the repository. +.PP +.PP +.nf +# For the base-system: + trunk/ + bin/ + usr/ + sbin/ + tags/ + tag-1/ + branches/ + branch-1/ +# For the machines: + machines/ + machine1/ + etc/ + passwd + HOSTNAME + machine2/ + etc/ + passwd + HOSTNAME +.fi +.PP +.SS "Per-area" +Here every part gets its \fCtrunk\fP, \fCbranches\fP and \fCtags:\fP +.PP +.PP +.nf + base/ + trunk/ + bin/ + sbin/ + usr/ + tags/ + tag-1/ + branches/ + branch-1/ + machine1/ + trunk/ + etc/ + passwd + HOSTNAME + tags/ + tag-1/ + branches/ + machine2/ + trunk/ + etc/ + passwd + HOSTNAME + tags/ + branches/ +.fi +.PP +.SS "Common trunk, tags, and branches" +Here the base-paths \fCtrunk\fP, \fCtags\fP and \fCbranches\fP are shared: +.PP +.PP +.nf + trunk/ + base/ + bin/ + sbin/ + usr/ + machine2/ + etc/ + passwd + HOSTNAME + machine1/ + etc/ + passwd + HOSTNAME + tags/ + tag-1/ + branches/ + branch-1/ +.fi +.PP +.SH "Other notes" +.PP +.SS "1" +Conflicts should not be automatically merged. If two or more trees bring the same file, the file from the \fIhighest\fP tree wins - this way you always know the file data on your machines. It's better if a single software doesn't work, compared to a machine that no longer boots or is no longer accessible (eg. by SSH)). +.PP +So keep your base installation at highest priority, and you've got good chances that you won't loose control in case of conflicting files. +.SS "2" +If you don't know which files are diffent in your installs, +.IP "\(bu" 2 +install two machines, +.IP "\(bu" 2 +commit the first into fsvs, +.IP "\(bu" 2 +do a \fBsync-repos\fP on the second, +.IP "\(bu" 2 +and look at the \fBstatus\fP output. +.PP +.SS "3" +As debian includes FSVS in the near future, it could be included on the next KNOPPIX, too! +.PP +Until then you'd need a custom boot CD, or copy the absolute minimum of files to the harddisk before recovery. +.PP +There's a utility \fCsvntar\fP available; it allows you to take a snapshot of a subversion repository directly into a \fC\fP.tar -file, which you can easily export to destination machine. (Yes, it knows about the meta-data properties FSVS uses, and stores them into the archive.) +.SS "4" +Why no file merging? Because all real differences are in the per-machine files -- the files that are in the \fCbase\fP repository are changed only on a single machine, and so there's an unidirectional flow. +.PP +BTW, how would you merge your binaries, eg. \fC/bin/ls\fP? +.SH "Feedback" +.PP +If you've got any questions, ideas, wishes or other feedback, please tell us in the mailing list \fCusers [at] fsvs.tigris.org\fP. +.PP +Thank you! +.PP + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/doc/fsvs-options.5 /tmp/FWQOjxAJQ0/fsvs-1.1.17/doc/fsvs-options.5 --- fsvs-1.1.14/doc/fsvs-options.5 2008-04-02 06:24:33.000000000 +0100 +++ fsvs-1.1.17/doc/fsvs-options.5 2008-10-29 08:19:27.000000000 +0000 @@ -1,4 +1,4 @@ -.TH "Further options for FSVS." 1 "1 Apr 2008" "Version trunk:2078" "fsvs" \" -*- nroff -*- +.TH "FSVS - Options and configfile" 5 "29 Oct 2008" "Version trunk:2782" "fsvs" \" -*- nroff -*- .ad l .nh .SH NAME @@ -9,12 +9,22 @@ .PD 0 .IP "\(bu" 2 +\fCall_removed\fP - \fBTrimming the list of deleted entries\fP +.IP "\(bu" 2 +\fCauthor\fP - \fBAuthor\fP +.IP "\(bu" 2 +\fCchange_check\fP - \fBChange detection\fP +.IP "\(bu" 2 \fCcolordiff\fP - \fBUsing colordiff\fP .IP "\(bu" 2 \fCcommit_to\fP - \fBDestination URL for commit\fP .IP "\(bu" 2 \fCconflict\fP - \fBHow to resolve conflicts on update\fP .IP "\(bu" 2 +\fCconf\fP - \fBPath definitions for the config and WAA area\fP. +.IP "\(bu" 2 +\fCconfig_dir\fP - \fBConfiguration directory for the subversion libraries\fP. +.IP "\(bu" 2 \fCcopyfrom_exp\fP - \fBAvoiding expensive compares on \fBcopyfrom-detect\fP\fP .IP "\(bu" 2 \fCdebug_output\fP - \fBDestination for debug output\fP @@ -29,6 +39,8 @@ .IP "\(bu" 2 \fCfilter\fP - \fBFiltering entries\fP, but see \fB-f\fP. .IP "\(bu" 2 +\fClimit\fP - \fB'fsvs log' revision limit\fP +.IP "\(bu" 2 \fClog_output\fP - \fB'fsvs log' output format\fP .IP "\(bu" 2 \fCmerge_prg\fP, \fCmerge_opt\fP - \fBOptions regarding the 'merge' programm\fP @@ -39,7 +51,11 @@ .IP "\(bu" 2 \fCstat_color\fP - \fBStatus output coloring\fP .IP "\(bu" 2 +\fCstop_on_change\fP - \fBStopping status reports as soon as changes are found\fP +.IP "\(bu" 2 \fCwarning\fP - \fBSetting warning behaviour\fP, but see \fB-W\fP. +.IP "\(bu" 2 +\fCwaa\fP - \fBo_waa\fP. .PP .SH "Priorities for option setting" .PP @@ -99,12 +115,13 @@ .PP There are several possible settings, of which one can be chosen via the \fCpath\fP option. .PP +.PD 0 .IP "\(bu" 2 \fCwcroot\fP .br This is the old, traditional FSVS setting, where all paths are printed relative to the working copy root. .PP -.PP + .IP "\(bu" 2 \fCparameter\fP .br @@ -122,16 +139,18 @@ .IP "\(bu" 2 \fCabsolute\fP .br - All paths are printed in absolute form. This is useful if you want to paste them into other consoles without worrying whether the current directory matches. + All paths are printed in absolute form. This is useful if you want to paste them into other consoles without worrying whether the current directory matches. .PP .PP -The next two are nearly identical to \fCabsolute\fP, but the beginning of paths are substituted by environment variables. This makes sense if you want the advantage of full paths, but have some of them abbreviated. +The next two are nearly identical to \fCabsolute\fP, but the beginning of paths are substituted by environment variables. This makes sense if you want the advantage of full paths, but have some of them abbreviated. +.PD 0 + .IP "\(bu" 2 \fCenvironment\fP .br Match variables to directories after reading the known entries, and use this cached information. This is faster, but might miss the best case if new entries are found (which would not be checked against possible longer hits). .br - Furthermore, as this works via associating environment variables to entries, the environment variables must at least match the working copy base - shorter paths won't be substituted. + Furthermore, as this works via associating environment variables to entries, the environment variables must at least match the working copy base - shorter paths won't be substituted. .IP "\(bu" 2 \fCfull-environment\fP .br @@ -143,9 +162,12 @@ .RS 4 The string of the environment variables must match a directory name; the filename is always printed literally, and partial string matches are not allowed. Feedback wanted. .PP -Only environment variables whose names start with \fCWC\fP are used for substitution, to avoid using variables like \fC$PWD\fP, \fC$OLDPWD\fP, \fC$HOME\fP and similar which might differ between sessions. Maybe the allowed prefixes for the environment variables should be settable in the configuration. Opinions to the users mailing list, please. +Only environment variables whose names start with \fCWC\fP are used for substitution, to avoid using variables like \fC$PWD\fP, \fC$OLDPWD\fP, \fC$HOME\fP and similar which might differ between sessions. Maybe the allowed prefixes for the environment variables should be settable in the configuration. Opinions to the users mailing list, please. .RE .PP + +.PP +.PP Example, with \fC/\fP as working copy base: .PP .nf @@ -193,8 +215,6 @@ .fi .PP - -.PP .PP \fBNote:\fP .RS 4 @@ -206,12 +226,14 @@ The diff is not done internally in FSVS, but some other program is called, to get the highest flexibility. .PP There are several option values: +.PD 0 + .IP "\(bu" 2 -\fCdiff_prg\fP: The executable name, default \fC'diff'\fP. +\fCdiff_prg\fP: The executable name, default \fC'diff'\fP. .IP "\(bu" 2 -\fCdiff_opt\fP: The default options, default \fC'-pu'\fP. +\fCdiff_opt\fP: The default options, default \fC'-pu'\fP. .IP "\(bu" 2 -\fCdiff_extra\fP: Extra options, no default. +\fCdiff_extra\fP: Extra options, no default. .PP .PP The call is done as @@ -227,20 +249,23 @@ In \fCdiff_opt\fP you should use only use command line flags without parameters; in \fCdiff_extra\fP you can encode a single flag with parameter (like \fC'-U5'\fP). If you need more flexibility, write a shell script and pass its name as \fCdiff_prg\fP. .RE .PP +Very advanced users might be interested in \fBexported environment\fPvariables', too. .SH "Using colordiff" .PP If you have \fCcolordiff\fP installed on your system, you might be interested in the \fCcolordiff\fP option. .PP It can take on of these values: +.PD 0 + .IP "\(bu" 2 -\fCno\fP, \fCoff\fP or \fCfalse:\fP Don't use \fCcolordiff\fP. +\fCno\fP, \fCoff\fP or \fCfalse:\fP Don't use \fCcolordiff\fP. .IP "\(bu" 2 -\fCyes\fP, \fCtrue\fP or \fCon:\fP If this option is set on the commandline, or the output is a tty, pipe the output of the \fCdiff\fP program (see \fBOptions relating to the 'diff' action\fP) to \fCcolordiff\fP. +empty (default value): Try to use \fCcolordiff\fP as executable, but don't throw an error if it can't be started; just pipe the data as-is to \fCSTDOUT\fP. .IP "\(bu" 2 -\fCauto:\fP Like yes, but don't throw an error if colordiff can't be started; just pipe the data as-is to \fCSTDOUT\fP. +anything else: Pipe the output of the \fCdiff\fP program (see \fBOptions relating to the 'diff' action\fP) to the given executable. .PP .PP -The default value is \fCauto\fP. +Please note that if \fCSTDOUT\fP is not a tty (eg. is redirected into a file), this option must be given on the command line to take effect. .SH "Filtering entries" .PP Please see the command line parameter for \fB-f\fP, which is identical. @@ -250,6 +275,40 @@ fsvs -o filter=mtime .fi .PP +.SH "Trimming the list of deleted entries" +.PP +If you remove a directory, all entries below are implicitly known to be deleted, too. To make the \fBstatus\fP output shorter there's the \fCall_removed\fP option; which, if set to \fCyes\fP, will cause children of removed entries to be omitted. +.PP +Example for the config file: +.PP +.nf + all_removed=yes + +.fi +.PP +.SH "Change detection" +.PP +This options allows to specify the trade-off between speed and accuracy. +.PP +A file with a changed size can immediately be known as changed; but if only the modification time is changed, this is not so easy. Per default FSVS does a MD5 check on the file in this case; if you don't want that, or if you want to do the checksum calculation for \fBevery\fP file (in case a file has changed, but its mtime not), you can use this option to change FSVS' behaviour. +.PP +On the command line there's a shortcut for that: for every \fC'-C'\fP another check in this option is chosen. +.PP +The recognized specifications are noneResets the check bitmask to 'no checks'. file_mtimeCheck files for modifications via MD5 if the mtime is different - default dirCheck all directories for new entries - this happens normally if a directory ha allfilesCheck \fBall\fP files with MD5 for changes (\fCtripwire\fP -like operation). fullAll available checks. +.PP +You can give multiple options; they're accumulated unless overridden by \fCnone\fP. +.PP +.nf + fsvs -o change_check=allfiles status + +.fi +.PP +.PP +\fBNote:\fP +.RS 4 +\fIcommit\fP and \fIupdate\fP set additionally the \fCdir\fP option, to avoid missing new files. +.RE +.PP .SH "Setting warning behaviour" .PP Please see the command line parameter \fB-W\fP, which is identical. @@ -261,7 +320,7 @@ .PP .SH "Using an alternate root directory" .PP -This is a path that is prepended to \fC$FSVS_WAA\fP and \fC$FSVS_CONF\fP (or their default values, see \fBenvs\fP), if they do not already start with it, and it is cut off for the directory-name MD5 calculation. +This is a path that is prepended to \fC$FSVS_WAA\fP and \fC$FSVS_CONF\fP (or their default values, see \fBFiles used by fsvs\fP), if they do not already start with it, and it is cut off for the directory-name MD5 calculation. .PP When is that needed? Imagine that you've booted from some Live-CD like Knoppix; if you want to setup or restore a non-working system, you'd have to transfer all files needed by the \fCfsvs\fP binary to it, and then start in some kind of \fCchroot\fP environment. .PP @@ -270,10 +329,16 @@ This is used for recovery; see the example in \fBRecovery for a non-booting system\fP. .PP So how does this work? +.PD 0 + .IP "\(bu" 2 -The internal data paths derived from \fC$FSVS_WAA\fP and \fC$FSVS_CONF\fP use the value given for \fCsoftroot\fP as a base directory, if they do not already start with it. (If that creates a conflict for you, eg. in that you want to use \fC/var\fP as the \fCsoftroot\fP, and your \fC$FSVS_WAA\fP should be \fC/var/fsvs\fP, you can make the string comparison fail by using \fC/./var\fP for either path.) +The internal data paths derived from \fC$FSVS_WAA\fP and \fC$FSVS_CONF\fP use the value given for \fCsoftroot\fP as a base directory, if they do not already start with it. +.br + (If that creates a conflict for you, eg. in that you want to use \fC/var\fP as the \fCsoftroot\fP, and your \fC$FSVS_WAA\fP should be \fC/var/fsvs\fP, you can make the string comparison fail by using \fC/./var\fP for either path.) +.PP + .IP "\(bu" 2 -When a directory name for \fC$FSVS_CONF\fP or \fC$FSVS_WAA\fP is derived from some file path, the part matching \fCsoftroot\fP is cut off, so that the generated names match the situation after rebooting. +When a directory name for \fC$FSVS_CONF\fP or \fC$FSVS_WAA\fP is derived from some file path, the part matching \fCsoftroot\fP is cut off, so that the generated names match the situation after rebooting. .PP .PP Previously you'd have to \fBexport\fP your data back to the filesystem and call \fBurls\fP \fC'fsvs urls'\fP and \fCfsvs\fP \fBsync-repos\fP again, to get the WAA data back. @@ -283,22 +348,40 @@ A plain \fCchroot()\fP would not work, as some needed programs (eg. the decoder for update, see \fBSpecial property names\fP) would not be available. .PP The easy way to understand \fCsoftroot\fP is: If you want to do a \fCchroot()\fP into the given directory (or boot with it as \fC/\fP), you'll want this set. +.PP +As this value is used for finding the correct working copy root (by trying to find a \fBconf-path\fP, it cannot be set from a per-wc config file. Only the environment, global configuration or command line parameter make sense. .RE .PP +.SH "'fsvs log' revision limit" +.PP +There are some defaults for the number of revisions that are shown on a \fC'fsvs log'\fP command: +.PD 0 + +.IP "\(bu" 2 +2 revisions given (\fC-rX:Y\fP): \fCabs\fP(X-Y)+1, ie. all revisions in that range. +.IP "\(bu" 2 +1 revision given: exactly that one. +.IP "\(bu" 2 +no revisions given: from \fCHEAD\fP to 1, with a maximum of 100. +.PP +.PP +So this command is mostly useful to get more than the default number of revisions on when running without revision arguments, or to get fewer. .SH "'fsvs log' output format" .PP You can modify aspects of the \fBfsvs log\fP output format by setting the \fClog_output\fP option to a combination of these flags: +.PD 0 + .IP "\(bu" 2 \fCcolor:\fP This uses color in the output, similar to \fCcg-log\fP (cogito-log); the header and separator lines are highlighted. .PP \fBNote:\fP .RS 4 -This uses ANSI escape sequences, and tries to restore the default color; if you know how to do that better (and more compatible), please tell the developer mailing list. +This uses ANSI escape sequences, and tries to restore the default color; if you know how to do that better (and more compatible), please tell the developer mailing list. .RE .PP .IP "\(bu" 2 -\fCindent:\fP Additionally you can shift the log message itself a space to the right, to make the borders clearer. +\fCindent:\fP Additionally you can shift the log message itself a space to the right, to make the borders clearer. .PP .PP Furthermore the value \fCnormal\fP is available; this turns off all special handling. @@ -321,7 +404,34 @@ .PP \fBNote:\fP .RS 4 -If \fCfsvs\fP aborts with an error during \fBstatus\fP output, you might want to turn this option off again (eg. on the commandline with \fC-odir_sort=no\fP) to see where \fCfsvs\fP stops. +If \fCFSVS\fP aborts with an error during \fBstatus\fP output, you might want to turn this option off again, to see where \fCfsvs\fP stops; the easiest way is on the commandline with \fC-odir_sort=no\fP. +.RE +.PP +.SH "Author" +.PP +You can specify an author to be used on commit. This option has a special behaviour; if the first character of the value is an \fC'$'\fP, the value is replaced by the environment variable named. +.PP +Empty strings are ignored; that allows an \fC/etc/fsvs/config\fP like this: +.PP +.nf + author=unknown + author=$LOGNAME + author=$SUDO_USER + +.fi +.PP + where the last non-empty value is taken; and if your \fC\fP.authorized_keys has lines like +.PP +.nf + environment='FSVS_AUTHOR=some_user' ssh-rsa ... + +.fi +.PP + that would override the config values. +.PP +\fBNote:\fP +.RS 4 +Your \fCsshd_config\fP needs the \fCPermitUserEnvironment\fP setting; you can also take a look at the \fCAcceptEnv\fP and \fCSendEnv\fP documentation. .RE .PP .SH "Destination URL for commit" @@ -394,6 +504,18 @@ .fi .PP +.SH "Stopping status reports as soon as changes are found" +.PP +If you want to use \fCFSVS\fP in scripts, you might simply want to know whether anything was changed. +.PP +For this use the \fCstop_on_change\fP option, possibly combined with \fBFiltering entries\fP : +.PP +.nf + fsvs -o stop_change=yes st /etc + fsvs -o stop_change=yes -o filter=text status /etc/init.d + +.fi +.PP .SH "How to resolve conflicts on update" .PP If you start an update, but one of the entries that was changed in the repository is changed locally too, you get a conflict. @@ -440,10 +562,12 @@ Like with \fBdiff\fP, the \fCmerge\fP operation is not done internally in FSVS. .PP To have better control +.PD 0 + .IP "\(bu" 2 -\fCmerge_prg\fP: The executable name, default \fC'merge'\fP. +\fCmerge_prg\fP: The executable name, default \fC'merge'\fP. .IP "\(bu" 2 -\fCmerge_opt\fP: The default options, default \fC'-A'\fP. +\fCmerge_opt\fP: The default options, default \fC'-A'\fP. .PP .PP The option \fC'-p'\fP is always used: @@ -453,6 +577,35 @@ .fi .PP +.SH "Path definitions for the config and WAA area" +.PP +.PP +The paths given here are used to store the persistent configuration data needed by FSVS; please see \fBFiles used by fsvs\fP and \fBPriorities for option setting\fP for more details, and the \fBUsing an alternate root directory\fP parameter as well as the \fBRecovery for a non-booting system\fP for further discussion. +.PP +.PP +.nf + FSVS_CONF=/home/user/.fsvs-conf fsvs -o waa=/home/user/.fsvs-waa st +.fi +.PP +.PP +\fBNote:\fP +.RS 4 +Please note that these paths can be given \fBonly\fP as environment variables (\fC$FSVS_CONF\fP resp. \fC$FSVS_WAA\fP) or as command line parameter; settings in config files are ignored. +.RE +.PP +.SH "Configuration directory for the subversion libraries" +.PP +This path specifies where the subversion libraries should take their configuration data from; the most important aspect of that is authentication data, especially for certificate authentication. +.PP +The default value is \fC$FSVS_CONF/auth/\fP. +.PP +\fC/etc/fsvs/config\fP could have eg. +.PP +.nf + config_dir=/root/.subversion + +.fi +.PP .PP diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/doc/fsvs-url-format.5 /tmp/FWQOjxAJQ0/fsvs-1.1.17/doc/fsvs-url-format.5 --- fsvs-1.1.14/doc/fsvs-url-format.5 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/doc/fsvs-url-format.5 2008-10-29 08:19:27.000000000 +0000 @@ -0,0 +1,82 @@ +.TH "FSVS - URL format" 5 "29 Oct 2008" "Version trunk:2782" "fsvs" \" -*- nroff -*- +.ad l +.nh +.SH NAME +Format of URLs \- The given URLs are 'overlayed' according to their priority, and they get a name (to ease updating only parts). +.PP +Such an \fIextended URL\fP has the form +.PP +.nf + ['name:'{name},]['target:'{t-rev},]['prio:'{prio},]URL + +.fi +.PP + where URL is a standard URL known by subversion -- something like \fChttp://....\fP, \fCsvn://...\fP or \fCsvn+ssh://...\fP. +.PP +The arguments before the URL are optional and can be in any order; the URL must be last. +.PP +Example: +.PP +.nf + name:perl,prio:5,svn://... + +.fi +.PP + or, using abbreviations, +.PP +.nf + N:perl,P:5,T:324,svn://... + +.fi +.PP +.PP +Please mind that the full syntax is in lower case, whereas the abbreviations are capitalized! +.br + Internally the \fC\fP: is looked for, and if the part before this character is a known keyword, it is used. +.br + As soon as we find an unknown keyword we treat it as an URL, ie. stop processing. +.PP +The priority is in reverse numeric order - the lower the number, the higher the priority. (See \fC\fBurl__current_has_precedence()\fP\fP ) +.SH "Why a priority?" +.PP +When we have to overlay several URLs, we have to know \fBwhich\fP URL takes precedence - in case the same entry is in more than one. \fB(Which is \fBnot\fP recommended!)\fP +.SH "Why a name?" +.PP +We need a name, so that the user can say 'commit all outstanding changes to the repository at URL x', without having to remember the full URL. After all, this URL should already be known, as there's a list of URLs to update from. +.PP +You should only use alphanumeric characters and the underscore here; or, in other words, \fC\\w\fP or \fC\fP[a-zA-Z0-9_]. (Whitespace, comma and semicolon get used as separators.) +.SH "What can I do with the target revision?" +.PP +Using the target revision you can tell fsvs that it should use the given revision number as destination revision - so update would go there, but not further. Please note that the given revision number overrides the \fC-r\fP parameter; this sets the destination for all URLs. +.PP +The default target is \fCHEAD\fP. +.PP +\fBNote:\fP +.RS 4 +In subversion you can enter \fCURL@revision\fP - this syntax may be implemented in fsvs too. (But it has the problem, that as soon as you have a \fC@\fP in the URL, you \fBmust\fP give the target revision everytime!) +.RE +.PP +.SH "There's an additional internal number - why that?" +.PP +This internal number is not for use by the user. It is just used to have an unique identifier for an URL, without using the full string. +.PP +\fBNote:\fP +.RS 4 +On my system the package names are on average 12.3 characters long (1024 packages with 12629 bytes, including newline): +.PP +.nf + COLUMNS=200 dpkg-query -l | cut -c5- | cut -f1 -d' ' | wc + +.fi +.PP +.RE +.PP +So if we store an \fIid\fP of the url instead of the name, we have approx. 4 bytes per entry (length of strings of numbers from 1 to 1024). Whereas we'd now use 12.3 characters, that's a difference of 8.3 per entry. +.PP +Multiplied with 150 000 entries we get about 1MB difference in filesize of the dir-file. Not really small ... +.PP +Currently we use about 92 bytes per entry. So we'd (unnecessarily) increase the size by about 10%. +.PP +That's why there's an \fBurl_t::internal_number\fP. +.PP + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/doc/IGNORING /tmp/FWQOjxAJQ0/fsvs-1.1.17/doc/IGNORING --- fsvs-1.1.14/doc/IGNORING 2008-02-22 19:12:41.000000000 +0000 +++ fsvs-1.1.17/doc/IGNORING 2008-10-29 08:19:27.000000000 +0000 @@ -46,6 +46,10 @@ you want to exclude a directories' files, but not the directory itself, use something like ./dir/* or ./dir/** + If you're deep within your working copy and you'd like to ignore some + files with a WC-relative ignore pattern, you might like to use the rign + command. + Absolute shell patterns There's another way to specify shell patterns - using absolute paths. @@ -146,15 +150,56 @@ Modifiers - All of these patterns can have one or more of these modifiers *before* - them; not all combinations make sense. - - Modifier Meaning - i Ignore case for matching - t A negative ignore pattern, ie. a take pattern. + All of these patterns can have one or more of these modifiers before + them, with (currently) optional "," as separators; not all combinations + make sense. + + Modifier Meaning + i Ignore case for matching + t A negative ignore pattern, ie. a take pattern. + d Match directories only. This is useful if you have a directory tree + in which only certain files should be taken; see below. + m:specification Mode matching; this expects a specification of two + octal values in the form m:and_value:compare_value, like m:04:00; the + following examples give only the numbers. + As an example: the file has mode 0750; a specification of + * 0700:0700 matches, and + * 0007:0007 doesn't match. + + A real-world example: 0007:0000 would match all entries that have no + right bits set for "others", and could be used to exclude private files + (like /etc/shadow). (Alternatively, the others-read bit could be used: + 0004:0000. + FSVS will give an error for invalid specifications, ie. ones that can + never match; an example would be 0700:0007. + + For patterns with the m (mode match) and d (dironly) modifiers the + filename pattern gets optional; so you don't have to give an all-match + wildcard pattern (./**) for these cases. t./proc/stat ./proc/ Such declaration would store only /proc/stat , and nothing else of /proc . + + t,d,./var/vmail/** + t./var/vmail/**/.*.sieve + ./var/vmail/** + + This would take all ".*.sieve" files (or directories) below /var/vmail, + in all depths, and all directories there; but no other files. + + If your files are at a certain depth, and you don't want all other + directories taken, too, you can specify that exactly: + td./var/vmail/*§ + td./var/vmail/*/* + t./var/vmail/*/*/.*.sieve + ./var/vmail/** + + m:04:0 + t,./etc/ + ./** + + This would take all files from /etc, but ignoring the files that are + not world-readable (other-read bit cleared). diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/doc/notice.txt /tmp/FWQOjxAJQ0/fsvs-1.1.17/doc/notice.txt --- fsvs-1.1.14/doc/notice.txt 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/doc/notice.txt 2008-10-25 12:15:25.000000000 +0100 @@ -0,0 +1,6 @@ +Many of the files in this directory are autogenerated from the comments in +the source files. +It might be better to change them; but I'll accept documentation patches, +too. (I just have to put the changes back into the source files). + +If you want to help, just ask on the dev@ mailing list. diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/doc/USAGE /tmp/FWQOjxAJQ0/fsvs-1.1.17/doc/USAGE --- fsvs-1.1.14/doc/USAGE 2008-04-02 06:24:33.000000000 +0100 +++ fsvs-1.1.17/doc/USAGE 2008-10-29 08:19:27.000000000 +0000 @@ -28,7 +28,7 @@ Defining which entries to take: - ignore + ignore and rign Define ignore patterns unversion @@ -38,7 +38,7 @@ Add entries that would be ignored cp, mv - Tell fsvs that entries were copied. + Tell fsvs that entries were copied Commands working with the repository: @@ -52,13 +52,16 @@ Fetch some part of the repository, and register it as working copy - revert - Undo local changes + cat + Get a file from the directory + + revert and uncp + Undo local changes and entry markings remote-status Ask what an update would bring -Property handling +Property handling: prop-set Set user-defined properties @@ -82,6 +85,10 @@ Multi-url-operations are relatively new; there might be rough edges. + The return code is 0 for success, or 2 for an error. 1 is returned if + the option Stopping status reports as soon as changes are found is + used, and changes are found; see also Filtering entries. + Universal options -V -- show version @@ -130,24 +137,8 @@ -C -- checksum - -C increments the checksum flag. Normally status tells that a file has - possible modification, if its mtime has changed but its size not. Using - -C you can tell the commands to be extra careful and always check for - modifications. - - The values are - 0 Normal operations - 1 Check files for modifications if possibly changed - 2 Do an MD5 verification for all files, and check all directories for - new entries. - - If a files size has changed, we can be sure that it's changed; a - directory is checked for changes if any of its meta-data has changed - (mtime, ctime, owner, group, size, mode). - - Note: - commit and update set the checksum flag to at least 1, to avoid - missing changed files. + -C chooses to use more change detection checks; please see the + change_check option for more details. -f -- filter entries @@ -155,15 +146,15 @@ operations, modification of the work done on given entries. It requires a specification at the end, which can be any combination of - any, text, new, deleted, meta, mtime, group or owner. + any, text, new, deleted (or removed), meta, mtime, group, mode, changed + or owner. By giving eg. the value text, with a status action only entries that are new or changed are shown; with mtime,group only entries whose group or modification time has changed are printed. Note: - The list does not include possibly changed entries; see -C -- - checksum -C. + Please see Change detection for some more information. If an entry gets replaced with an entry of a different type (eg. a directory gets replaced by a file), that counts as deleted and new. @@ -242,14 +233,14 @@ Also an environment variable FSVS_WARNINGS is used and parsed; it is simply a whitespace-separated list of option specifications. --u URLname[@revision] -- select URLs +-u URLname[@revision[:revision]] -- select URLs - Some commands' operations can be reduced to a subset of defined URLs; - the update command is the best example. + Some commands can be reduced to a subset of defined URLs; the update + command is a example. - If you have more than a single URL in use for your working copy, and - update updates all entries from all URLs. By using this parameter you - can tell FSVS to update only a single URL. + If you have more than a single URL in use for your working copy, update + normally updates all entries from all URLs. By using this parameter you + can tell FSVS to update only the specified URLs. The parameter can be used repeatedly; the value can have multiple URLs, separated by whitespace or one of ",;". @@ -259,6 +250,10 @@ This would get HEAD of base_install and gcc, and set the target revision of the boot URL at 32. + Note: + The second revision specification will be used for eg. the diff + command; but this is not yet implemented. + -o [name[=value]] -- other options This is used for setting some seldom used option, for which default can @@ -342,6 +337,34 @@ revision of the entries is unknown, we can only use 0 - and loose information this way! +delay + + This command delays execution until the time has passed at least to the + next second after writing the dir and urls files. So, where previously + the delay option was used, this can be substituted by the given command + followed by the delay command. + + The advantage is over the Waiting for a time change after working copy + operations option is, that read-only commands can be used in the + meantime. + + An example: + fsvs commit /etc/X11 -m "Backup of X11" + ... read-only commands, like "status" + fsvs delay /etc/X11 + ... read-write commands, like "commit" + + In the testing framework it is used to save a bit of time; in normal + operation, where fsvs commands are not so tightly packed, it is + normally preferable to use the delay option. + +cat + + fsvs cat [-r rev] path + + Fetches a file with the specified revision or, if not given, BASE, from + the repository, and outputs it to STDOUT. + checkout fsvs checkout [path] URL [URLs...] @@ -391,10 +414,6 @@ If you're currently in /etc , you can even drop the /etc/ in front, and just use the filenames. - This extended path handling on the commandline is not yet available for - every command. Most of them still expect you to be in the working copy - root. - Please see status for explanations on -v and -C . For advanced backup usage see also FSVS_PROP_COMMIT_PIPE. @@ -404,9 +423,9 @@ fsvs cp dump fsvs cp load - This command marks DEST as a copy of SRC at revision rev, so that on - the next commit of DEST the corresponding source path is sent as copy - source. + The copy command marks DEST as a copy of SRC at revision rev, so that + on the next commit of DEST the corresponding source path is sent as + copy source. The default value for rev is BASE, ie. the revision the SRC (locally) is at. @@ -542,6 +561,27 @@ If too many possible matches are found, not all may be printed; only the indicator ... is shown at the end. +uncp + + fsvs uncopy DEST [DEST ...] + + The uncopy command removes a copyfrom mark from the destination entry. + This will make the entry unknown again, and reported as New on the next + invocations. + + Only the base of a copy can be un-copied; if a directory structure was + copied, and the given entry is just implicitly copied, this command + will give you an error. + + This is not folded in revert, because it's not clear whether revert + should restore the original copyfrom data or remove the copy attribute; + by using a special command this is no longer ambiguous. + + Example: + $ fsvs copy SourceFile DestFile + # Whoops, was wrong! + $ fsvs uncopy DestFile + diff fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...] @@ -570,6 +610,12 @@ Please see also Options relating to the "diff" action and Using colordiff. + Todo: + Two revisions diff is buggy in that it (currently) always + fetches the full tree from the repository; this is not only a + performance degradation, but you'll see more changed entries + than you want. This will be fixed. + export fsvs export REPOS_URL [-r rev] @@ -592,8 +638,8 @@ ignore - fsvs ignore [prepend|append|at=n] pattern[s] fsvs ignore dump|load + fsvs ignore [prepend|append|at=n] pattern [pattern ...] This command adds patterns to the end of the ignore list, or, with prepend , puts them at the beginning of the list. With at=x the @@ -643,6 +689,36 @@ Please take care that your wildcard patterns are not expanded by the shell! +rign + + fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...] + fsvs ri [prepend|append|at=n] path-spec [path-spec ...] + + If you use more than a single working copy for the same data, it will + be stored in different paths - and that makes absolute ignore patterns + infeasible. But relative ignore patterns are anchored at the beginning + of the WC root - which is a bit tiring if you're deep in your WC + hierarchy and want to ignore some files. + + To make that easier you can use the rel-ignore (abbreviated as ri) + command; this converts all given path-specifications (that may include + wildcards as per the shell pattern specification above) to WC-relative + values before storing them. + + Example for /etc as working copy root: + fsvs rel-ignore '/etc/X11/xorg.conf.*' + + cd /etc/X11 + fsvs rel-ignore 'xorg.conf.*' + + Both commands would store the pattern "./X11/xorg.conf.*". + + Note: + This works only for shell patterns. + + For more details about ignoring files please see the ignore command and + Using ignore patterns. + info fsvs info [-R [-R]] [PATH...] @@ -683,13 +759,12 @@ The optional rev1 and rev2 can be used to restrict the revisions that are shown; if no values are given, the logs are given starting from - HEAD downwards. + HEAD downwards, and then a limit on the number of revisions is applied + (but see the limit option). If you use the -v -option, you get the files changed in each revision printed, too. - Currently at most 100 log messages are shown. - There is an option controlling the output format; see "fsvs log" output format. @@ -698,7 +773,6 @@ * Show revision for all URLs associated with a working copy? In which order? * A URL-parameter, to specify the log URL. (Name) - * Limit number of revisions shown? prop-get @@ -730,7 +804,7 @@ This command removes property value for the given path(s). - See also prop-set + See also prop-set. prop-list @@ -775,8 +849,8 @@ the copy source data. * An unmodified direct copy destination entry, and other uncommitted entries with special flags (manually added, or defined as copied), - are changed back to "N"ew -- the copy definition and the - special status is removed. + are changed back to "N"ew -- the copy definition and the special + status is removed. Please note that on implicitly copied entries (entries that are marked as copied because some parent directory is the base of a copy) cannot be un-copied; they can only be reverted to their @@ -841,14 +915,8 @@ Working with copied entries If an entry is marked as copied from another entry (and not - committed!), a revert will undo the copy setting - which will make the - entry unknown again, and reported as new on the next invocations. - - If a directory structure was copied, and the current entry is just a - implicitly copied entry, revert would take the copy source as - reference, and get the file data from there. - - Summary: Only the base of a copy can be un-copied. + committed!), a revert will fetch the original copyfrom source. To undo + the copy setting use the uncp command. status @@ -883,8 +951,8 @@ * If the entry has been modified, the change is shown as 'C'. If the modification or status change timestamps (mtime, ctime) are changed, but the size is still the same, the entry is marked as - possibly changed (a question mark '?' is printed). See - opt_checksum. + possibly changed (a question mark '?' is printed) - but see change + detection for details. * The meta-data flag 'm' shows meta-data changes like properties, modification timestamp and/or the rights (owner, group, mode); depending on the -v/-q command line parameters, it may be splitted @@ -917,7 +985,7 @@ This is normally not needed; the use cases are * debugging and - * recovering from data loss in $FSVS_WAA (/var/spool/fsvs ). + * recovering from data loss in $FSVS_WAA. It is (currently) important if you want to backup two similar machines. Then you can commit one machine into a subdirectory of your repository, @@ -949,7 +1017,7 @@ update - ## invalid ## fsvs update [-r rev] [working copy base] + fsvs update [-r rev] [working copy base] fsvs update [-u url@rev ...] [working copy base] This command does an update on all specified URLs for the current diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/example/etc/apt/apt.conf.d/50fsvs-system-versioning /tmp/FWQOjxAJQ0/fsvs-1.1.17/example/etc/apt/apt.conf.d/50fsvs-system-versioning --- fsvs-1.1.14/example/etc/apt/apt.conf.d/50fsvs-system-versioning 2008-03-05 16:26:47.000000000 +0000 +++ fsvs-1.1.17/example/etc/apt/apt.conf.d/50fsvs-system-versioning 2008-04-18 07:29:13.000000000 +0100 @@ -1,2 +1,2 @@ DPkg::Post-Invoke ""; -DPkg::Post-Invoke:: "/var/lib/fsvs-versioning/scripts/commit.sh" +DPkg::Post-Invoke:: "/var/lib/fsvs-versioning/scripts/commit.sh"; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/example/setup.sh /tmp/FWQOjxAJQ0/fsvs-1.1.17/example/setup.sh --- fsvs-1.1.14/example/setup.sh 2008-03-05 16:28:11.000000000 +0000 +++ fsvs-1.1.17/example/setup.sh 2008-10-29 07:19:20.000000000 +0000 @@ -12,7 +12,7 @@ # Ignore if group already exists -addgroup fsvs || true +addgroup $group || true if fsvs info > /dev/null 2>&1 then @@ -41,28 +41,32 @@ # Create local filelist, to make "fsvs ps" work. fsvs checkout file://$location/trunk/etc - fsvs ignore './**.dpkg-old' './**.bak' './**.old' './**~' + fsvs ignore '/etc/**.dpkg-old' '/etc/**.dpkg-new' '/etc/**.dpkg-dist' '/etc/**.dpkg-bak' + fsvs ignore '/etc/**.bak' '/etc/**.old' '/etc/**~' '/**.swp' # easy to remake, no big deal (?) - fsvs ignore './ssh/ssh_host_*key' + fsvs ignore '/etc/ssh/ssh_host_*key' # Not used? - fsvs ignore ./apt/secring.gpg + fsvs ignore /etc/apt/secring.gpg - fsvs ignore ./mtab - fsvs ignore ./ld.so.cache ./adjtime + fsvs ignore /etc/mtab + fsvs ignore /etc/ld.so.cache /etc/adjtime # Just compiled data? - fsvs ignore './selinux/*.pp' + fsvs ignore '/etc/selinux/*.pp' # unknown whether that should be backuped. - fsvs ignore './identd.key' - fsvs ignore './ppp/*-secrets' + fsvs ignore '/etc/identd.key' + fsvs ignore '/etc/ppp/*-secrets' - fsvs ps fsvs:commit-pipe /var/lib/fsvs-versioning/scripts/remove-password-line.pl ddclient.conf + fsvs ps fsvs:commit-pipe /var/lib/fsvs-versioning/scripts/remove-password-line.pl ddclient.conf || true # Are there non-shadow systems? # fsvs ignore './shadow' './gshadow' - fsvs ps fsvs:commit-pipe /var/lib/fsvs-versioning/scripts/shadow-clean.pl shadow gshadow + fsvs ps fsvs:commit-pipe /var/lib/fsvs-versioning/scripts/shadow-clean.pl shadow gshadow + +# Ignore entries that are not world-readable. + fsvs ignore 'm:4:0' # Lock-files are not needed, are they? fsvs ignore './**.lock' './**.LOCK' diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/example/var/lib/fsvs-versioning/scripts/commit.sh /tmp/FWQOjxAJQ0/fsvs-1.1.17/example/var/lib/fsvs-versioning/scripts/commit.sh --- fsvs-1.1.14/example/var/lib/fsvs-versioning/scripts/commit.sh 2008-02-22 19:37:18.000000000 +0000 +++ fsvs-1.1.17/example/var/lib/fsvs-versioning/scripts/commit.sh 2008-08-16 07:23:30.000000000 +0100 @@ -1,4 +1,16 @@ #!/bin/sh -fsvs ci /etc -m "${1:-Auto-commit after dpkg}" -q +# So that the defined group can access the data +umask 007 + +# In case the process calling apt-get had some paths defined, they might +# not be what FSVS expects. +# Re-set the defaults. +export FSVS_CONF=/etc/fsvs +export FSVS_WAA=/var/spool/fsvs/ +# Possibly run this script or FSVS via env(1)? +# Would clean *all* FSVS_* variables. + +# Tell the author as "apt", because we're called by apt-get. +fsvs ci -o author=apt /etc -m "${1:-Auto-commit after dpkg}" -q diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/example/var/lib/fsvs-versioning/scripts/remove-password-line.pl /tmp/FWQOjxAJQ0/fsvs-1.1.17/example/var/lib/fsvs-versioning/scripts/remove-password-line.pl --- fsvs-1.1.14/example/var/lib/fsvs-versioning/scripts/remove-password-line.pl 2008-02-22 19:37:18.000000000 +0000 +++ fsvs-1.1.17/example/var/lib/fsvs-versioning/scripts/remove-password-line.pl 2008-05-26 14:12:12.000000000 +0100 @@ -3,6 +3,6 @@ while (<>) { # No substitution value, could be used wrongly - s#^(\s+password\s+=).*#\1#; + s#^(\s*password\s*=).*#\1#; print; } diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/example/var/lib/fsvs-versioning/scripts/shadow-clean.pl /tmp/FWQOjxAJQ0/fsvs-1.1.17/example/var/lib/fsvs-versioning/scripts/shadow-clean.pl --- fsvs-1.1.14/example/var/lib/fsvs-versioning/scripts/shadow-clean.pl 2008-02-22 19:37:18.000000000 +0000 +++ fsvs-1.1.17/example/var/lib/fsvs-versioning/scripts/shadow-clean.pl 2008-04-18 06:13:36.000000000 +0100 @@ -5,7 +5,7 @@ while (<>) { - @f=split(/:/); - $f[1]='-' if length($f[1]) > 1; - print; + @f=split(/(:)/); + $f[2]='-' if length($f[2]) > 1; + print join("", @f); } diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/Makefile /tmp/FWQOjxAJQ0/fsvs-1.1.17/Makefile --- fsvs-1.1.14/Makefile 2007-07-12 06:20:49.000000000 +0100 +++ fsvs-1.1.17/Makefile 2008-05-20 11:33:22.000000000 +0100 @@ -16,3 +16,8 @@ configure: configure.in @echo Generating configure. autoconf + +distclean: + rm -f config.cache config.log config.status 2> /dev/null || true + rm -f src/Makefile src/tags tests/Makefile 2> /dev/null || true + rm -f src/config.h src/*.[os] src/.*.d src/fsvs 2> /dev/null || true diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/README /tmp/FWQOjxAJQ0/fsvs-1.1.17/README --- fsvs-1.1.14/README 2008-01-13 16:27:04.000000000 +0000 +++ fsvs-1.1.17/README 2008-10-25 12:14:45.000000000 +0100 @@ -25,15 +25,16 @@ What does it do? ---------------- -fsvs is a backup/archival-tool, which uses subversion backends for storage. -This means that previous versions of all files are available in case of -hardware problems, data loss, virus infections, user problems etc. +FSVS is a backup/archival/versioning tool, which uses subversion backends +for storage. This means that previous versions of all files are available +in case of hardware problems, data loss, virus infections, user problems etc. -fsvs is used to take snapshots of the current machine and restore them; +FSVS is used to take snapshots of the current machine and restore them; all advanced operations (taking diffs, merging, etc.) should be done via some repository browser. -fsvs runs currently only on linux. +FSVS runs currently on Linux, OpenBSD and OS X, and I think it works with +Solaris, too - in short, UNIX should be fine. Why was it written? @@ -54,26 +55,37 @@ First install subversion (and, by implication, apr). Next compile fsvs. - cd src - make + cd src + make +And install the binary: (the man-pages are not automatically installed yet.) + make install Make a repository somewhere, preferably on another machine. - svnadmin create /path/to/repos + svnadmin create /path/to/repos Create a local directory for the "working copy administrative area". If you'd like to use another path, just set the environment variable WAA to it. - mkdir -p /var/spool/fsvs + mkdir -p /var/spool/fsvs /etc/fsvs Go to the base path for versioning: - cd / + cd / Tell fsvs which URL it should use: - fsvs url svn+ssh://username@machine/path/to/repos + fsvs url svn+ssh://username@machine/path/to/repos Define ignore patterns - all virtual filesystems (/proc, /sys, etc.), and (assuming that you're in / currently) the temporary files in /tmp: - fsvs ignore DEVICE:0 ./tmp/* + fsvs ignore DEVICE:0 ./tmp/* And you're ready to play! Check your data in: - fsvs commit -m "First import" + fsvs commit -m "First import" -See doc/USAGE for more details. +See the files in doc for more details; here, as (ordered) list: + fsvs.1 - Manual page; describes FSVS' commands + USAGE - Manual page in ASCII + IGNORING - Why/how to ignore entries + fsvs-url-format.5 - Detailed description of FSVS' URLs definitions + fsvs-options.5 - Options for FSVS (command line, config file) + fsvs-howto-backup.5 - A short HOWTO. + +These documents can be browsed in HTML on http://doc.fsvs-software.org/, +too. (And they're a bit more readable there.) If it bails out with an error, I'd appreciate if you'd run the failing command with the option "-v" (verbose) and send the last lines to the developers diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/ac_list.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/ac_list.c --- fsvs-1.1.14/src/ac_list.c 2008-03-25 06:25:20.000000000 +0000 +++ fsvs-1.1.17/src/ac_list.c 2008-10-25 12:13:12.000000000 +0100 @@ -13,6 +13,7 @@ #include "update.h" #include "export.h" #include "log.h" +#include "cat.h" #include "ignore.h" #include "cp_mv.h" #include "sync.h" @@ -35,32 +36,35 @@ /** Array of command name pointers. * The \c acl at the beginning means ACtion List. */ static const char - *acl_status[] = { "status", NULL }, - *acl_commit[] = { "commit", "checkin", "ci", NULL }, - *acl_update[] = { "update", NULL }, - *acl_export[] = { "export", NULL }, - *acl_build[] = { "_build-new-list", NULL }, - *acl_remote[] = { "remote-status", "rs", NULL }, - *acl_ignore[] = { "ignore", NULL }, - *acl_add[] = { "add", NULL }, - *acl_copyfr[] = { "copyfrom-detect", "copy-detect", NULL }, - *acl_cp[] = { "copy", "move", "cp", "mv", NULL }, - *acl_unvers[] = { "unversion", NULL }, - *acl_log[] = { "log", NULL }, - *acl_resolv[] = { "resolved", NULL }, - *acl_checko[] = { "checkout", "co", NULL }, - *acl_sync_r[] = { "sync-repos", NULL }, - *acl_revert[] = { "revert", "undo", NULL }, - *acl_prop_l[] = { "prop-list", "pl", NULL }, - *acl_prop_g[] = { "prop-get", "pg", NULL }, - *acl_prop_s[] = { "prop-set", "ps", NULL }, - *acl_prop_d[] = { "prop-del", "pd", NULL }, - *acl_diff[] = { "diff", NULL }, - *acl_help[] = { "help", "?", NULL }, - *acl_mergelist[] UNUSED = { "mergelist", NULL }, - *acl_info[] = { "info", NULL }, + *acl_status[] = { "status", NULL }, + *acl_commit[] = { "commit", "checkin", "ci", NULL }, + *acl_update[] = { "update", NULL }, + *acl_export[] = { "export", NULL }, + *acl_build[] = { "_build-new-list", NULL }, + *acl_delay[] = { "delay", NULL }, + *acl_remote[] = { "remote-status", "rs", NULL }, + *acl_ignore[] = { "ignore", NULL }, + *acl_rign[] = { "rel-ignore", "ri", "r-i", NULL }, + *acl_add[] = { "add", NULL }, + *acl_copyfr[] = { "copyfrom-detect", "copy-detect", NULL }, + *acl_cp[] = { "copy", "move", "cp", "mv", NULL }, + *acl_uncp[] = { "uncopy", NULL }, + *acl_unvers[] = { "unversion", NULL }, + *acl_log[] = { "log", NULL }, + *acl_cat[] = { "cat", NULL }, + *acl_resolv[] = { "resolved", NULL }, + *acl_checko[] = { "checkout", "co", NULL }, + *acl_sync_r[] = { "sync-repos", NULL }, + *acl_revert[] = { "revert", "undo", NULL }, + *acl_prop_l[] = { "prop-list", "pl", NULL }, + *acl_prop_g[] = { "prop-get", "pg", NULL }, + *acl_prop_s[] = { "prop-set", "ps", NULL }, + *acl_prop_d[] = { "prop-del", "pd", NULL }, + *acl_diff[] = { "diff", NULL }, + *acl_help[] = { "help", "?", NULL }, + *acl_info[] = { "info", NULL }, /** \todo: remove initialize */ - *acl_urls[] = { "urls", "initialize", NULL }; + *acl_urls[] = { "urls", "initialize", NULL }; /* A generated file. */ @@ -75,8 +79,11 @@ .work=_work, .local_callback=_act, \ __VA_ARGS__ } +/** Use the progress uninitializer */ #define UNINIT .local_uninit=st__progress_uninit +/** Store update-pipe strings */ #define DECODER .needs_decoder=1 +/** Commands obeys filtering via -f */ #define FILTER .only_opt_filter=1 @@ -84,23 +91,27 @@ struct actionlist_t action_list[]= { /* The first action is the default. */ - ACT(status, st__work, st__status, FILTER), + ACT(status, st__work, st__action, FILTER), ACT(commit, ci__work, ci__action, UNINIT, FILTER), ACT(update, up__work, st__progress, UNINIT, DECODER), ACT(export, exp__work, NULL, .is_import_export=1, DECODER), ACT(unvers, au__work, au__action, .i_val=RF_UNVERSION), ACT( add, au__work, au__action, .i_val=RF_ADD), - ACT( diff, df__work, df__action, DECODER), - ACT(sync_r, sync__work, NULL, .repos_feedback=sync__progress), + ACT( diff, df__work, NULL, DECODER), + ACT(sync_r, sync__work, NULL, .repos_feedback=sync__progress, .keep_user_prop=1), ACT( urls, url__work, NULL), - ACT(revert, rev__work, rev__action, UNINIT, DECODER, .keep_children=1), + ACT(revert, rev__work, NULL, UNINIT, DECODER, .keep_children=1), ACT(ignore, ign__work, NULL), + ACT( rign, ign__rign, NULL), ACT(copyfr, cm__detect, st__progress, UNINIT), ACT( cp, cm__work, NULL), + ACT( cat, cat__work, NULL), + ACT( uncp, cm__uncopy, NULL), ACT(resolv, res__work, res__action, .is_compare=1), ACT( log, log__work, NULL), ACT(checko, co__work, NULL, DECODER, .repos_feedback=st__rm_status), ACT( build, bld__work, st__status), + ACT( delay,delay__work, st__status), /* For help we set import_export, to avoid needing a WAA * (default /var/spool/fsvs) to exist. */ ACT( help, ac__Usage, NULL, .is_import_export=1), diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/actions.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/actions.c --- fsvs-1.1.14/src/actions.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/actions.c 2008-10-25 12:11:00.000000000 +0100 @@ -21,7 +21,7 @@ * Common functions for action (name) handling. */ -/** This wrapper-callback for the current action callback calculates the \a +/** This wrapper-callback for the current action callback calculates the * path and fills in the \c entry_type for the current \a sts, if * necessary. */ int ac__dispatch(struct estat *sts) @@ -31,21 +31,29 @@ status=0; if (!action->local_callback) goto ex; - /* remove all variables */ - switch (sts->entry_type) + /* We cannot really test the type here; on update we might only know that + * it's a special file, but not which type exactly. */ +#if 0 + BUG_ON(!( + S_ISDIR(sts->updated_mode) || S_ISREG(sts->updated_mode) || + S_ISCHR(sts->updated_mode) || S_ISBLK(sts->updated_mode) || + S_ISLNK(sts->updated_mode) ), + "%s has mode 0%o", sts->name, sts->updated_mode); +#endif + + if (ops__allowed_by_filter(sts) || + (sts->entry_status & FS_CHILD_CHANGED)) { - case FT_UNKNOWN: - case FT_ANY: - case FT_NONDIR: - sts->entry_type=ops___filetype(&(sts->st)); + /* If + * - we want to see all entries, + * - there's no parent that could be removed ("." is always there), or + * - the parent still exists, + * we print the entry. */ + if (opt__get_int(OPT__ALL_REMOVED) || + !sts->parent || + (sts->parent->entry_status & FS_REPLACED)!=FS_REMOVED) + STOPIF( action->local_callback(sts), NULL); } - - /* If we're filtering, */ - /* We want all entries (eg. -v) */ - if (opt__get_int(OPT__FILTER) == FILTER__ALL || - /* or it's an interesting entry. */ - (sts->entry_status & opt__get_int(OPT__FILTER))) - STOPIF( action->local_callback(sts), NULL); else { DEBUGP("%s is not the entry you're looking for", sts->name); diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/actions.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/actions.h --- fsvs-1.1.14/src/actions.h 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/actions.h 2008-08-02 16:10:38.000000000 +0100 @@ -15,13 +15,29 @@ /** \file * Action handling header file. */ -/** \name Action callbacks. */ +/** \anchor callbacks \name callbacks Action callbacks. */ /** @{ */ -/** One to show progress. +/** Callback that gets called for each entry. + * + * Entries get read from the entry list in global [device, inode] order; in + * the normal action callback (\ref actionlist_t::local_callback and \ref + * actionlist_t::repos_feedback) the parent entries are handled \b after child + * entries (but the parent \c struct \ref estat "estats" exist, of course), + * so that the list of children is correct. + * + * + * See also \ref waa__update_tree. + * * The full (wc-based) path can be built as required by \ref * ops__build_path().*/ +/* unused, wrong doc + * As in the entry list file (\ref dir) there is a callback \ref + * actionlist_t::early_entry that's done \b before the child entries; + * Clearing \ref estat::do_this_entry and \ref estat::do_tree in this + * callback will skip calling \ref actionlist_t::local_callback for this and + * the child entries (see \ref ops__set_to_handle_bits()). */ typedef int (action_t)(struct estat *sts); -/** One for working */ +/** Callback for initializing the action. */ typedef int (work_t)(struct estat *root, int argc, char *argv[]); /** One after all progress has been made. */ typedef int (action_uninit_t)(void); @@ -44,9 +60,19 @@ { /** Array of names this action will be called on the command line. */ const char** name; - /** The function doing the setup, tear down, and in-between - the real - * worker. */ + + /** The function doing the setup, tear down, and in-between - the + * worker main function. + * + * See \ref callbacks. */ work_t *work; + + /** The output function for repository accesses. + * Currently only used in cb__record_changes(). + * + * See \ref callbacks. */ + action_t *repos_feedback; + /** The local callback. * Called for each entry, just after it's been checked for changes. * Should give the user feedback about individual entries and what @@ -57,37 +83,42 @@ * \note A removed directory is taken as empty (as no more elements are * here) - this is used in \ref revert so that revert gets called twice * (once for restoring the directory itself, and again after its - * populated). */ + * populated). + * + * See \ref callbacks. */ action_t *local_callback; - /** The output function for repository accesses. - * Currently only used in cb__record_changes(). */ - action_t *repos_feedback; /** The progress reporter needs a callback to clear the line after printing * the progress. */ action_uninit_t *local_uninit; + + /** A pointer to the verbose help text. */ + char const *help_text; + + /** Flag for usage in the action handler itself. */ + int i_val; + /** Is this an import or export, ie do we need a WAA? * We don't cache properties, manber-hashes, etc., if is_import_export * is set. */ - int is_import_export; + int is_import_export:1; /** This is set if it's a compare operation (remote-status). * The properties are parsed, but instead of writing them into the * \c struct \c estat they are compared, and \c entry_status set * accordingly. */ - int is_compare; - /** A pointer to the verbose help text. */ - char const *help_text; + int is_compare:1; /** Whether we need fsvs:update-pipe cached. * Do we install files from the repository locally? Then we need to know * how to decode them. * We don't do that in every case, to avoid wasting memory. */ - int needs_decoder; + int needs_decoder:1; /** Whether the entries should be filtered on opt_filter. */ - int only_opt_filter; + int only_opt_filter:1; + /** Whether user properties should be stored in estat::user_prop while + * running cb__record_changes(). */ + int keep_user_prop:1; /** Makes ops__update_single_entry() keep the children of removed * directories. */ - int keep_children; - /** Flag for usage in the action handler itself. */ - int i_val; + int keep_children:1; }; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/add_unvers.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/add_unvers.c --- fsvs-1.1.14/src/add_unvers.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/add_unvers.c 2008-10-25 12:11:00.000000000 +0100 @@ -141,6 +141,8 @@ * */ +/** General function for \ref add and \ref unversion actions. + * This one really handles the entries. */ int au__action(struct estat *sts) { int status; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/build.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/build.c --- fsvs-1.1.14/src/build.c 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/build.c 2008-10-25 12:11:00.000000000 +0100 @@ -6,8 +6,11 @@ * published by the Free Software Foundation. ************************************************************************/ +#include + #include "global.h" #include "waa.h" +#include "helper.h" #include "url.h" #include "build.h" @@ -25,6 +28,8 @@ * In production it should not be used - as the revision of the entries * is unknown, we can only use 0 - and loose information this way! */ +/** Traverse the filesystem, build a tree, and store it as WC. + * Doesn't do anything with the repository. */ int bld__work(struct estat *root, int argc, char *argv[]) { int status; @@ -42,3 +47,75 @@ return status; } + +/** \addtogroup cmds + * \section delay + * + * This command delays execution until the time has passed at least to the + * next second after writing the \ref dir "dir" and \ref urls "urls" files. + * So, where previously the \ref delay "delay" option was used, this can be + * substituted by the given command followed by the \c delay command. + * + * The advantage is over the \ref o_delay option is, that read-only + * commands can be used in the meantime. + * + * An example: + * \code + * fsvs commit /etc/X11 -m "Backup of X11" + * ... read-only commands, like "status" + * fsvs delay /etc/X11 + * ... read-write commands, like "commit" + * \endcode + * + * In the testing framework it is used to save a bit of time; in normal + * operation, where \c fsvs commands are not so tightly packed, it is + * normally preferable to use the \ref o_delay "delay" option. */ +/** Waits until the \c dir and \c Urls files have been modified in the + * past, ie their timestamp is lower than the current time (rounded to + * seconds.) */ +int delay__work(struct estat *root, int argc, char *argv[]) +{ + int status; + int i; + time_t last; + struct sstat_t st; + char *filename, *eos; + char *list[]= { WAA__DIR_EXT, WAA__URLLIST_EXT }; + + + STOPIF( waa__find_base(root, &argc, &argv), NULL); + if (opt_verbose) + printf("Waiting on WC root \"%s\"\n", wc_path); + + last=0; + for(i=0; i last) last=st.mtim.tv_sec; + } + } + + DEBUGP("waiting until %llu", (t_ull)last); + opt__set_int(OPT__DELAY, PRIO_MUSTHAVE, -1); + STOPIF( hlp__delay(last, 1), NULL); + +ex: + return status; +} + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/build.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/build.h --- fsvs-1.1.14/src/build.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/build.h 2008-10-25 12:09:08.000000000 +0100 @@ -25,6 +25,8 @@ /** Build action. */ work_t bld__work; +/** Delay action. */ +work_t delay__work; #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/cache.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/cache.c --- fsvs-1.1.14/src/cache.c 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/cache.c 2008-10-25 12:11:00.000000000 +0100 @@ -65,7 +65,7 @@ (ce->len - len) > 1024) { /* Round up a bit (including the struct). */ - alloc_len = (alloc_len + 256-1) & ~64; + alloc_len = (alloc_len + 96-1) & ~64; if (copy_old_data) ce=realloc(ce, alloc_len); @@ -209,17 +209,20 @@ /** A simple hash. * Copies the significant bits ' ' .. 'Z' (or, really, \\x20 .. \\x60) of - * at most 5 bytes into a packed bitfield, so that 30bits are used. */ + * at most 6 bytes of \a stg into a packed bitfield, so that 30bits are + * used. */ inline cache_value_t cch___string_to_cv(const char *stg) { union { cache_value_t cv; struct { - int c0:5; - int c1:5; - int c2:5; - int c3:5; - int c4:5; + unsigned int c0:5; + unsigned int c1:5; + unsigned int c2:5; + unsigned int c3:5; + unsigned int c4:5; + unsigned int c5:5; + unsigned int ignore_me:2; }; } __attribute__((packed)) result; @@ -234,7 +237,9 @@ { result.c3 = *(stg++) - 0x20; if (*stg) { result.c4 = *(stg++) - 0x20; - } } } } } + if (*stg) + { result.c5 = *(stg++) - 0x20; + } } } } } } return result.cv; } diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/cache.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/cache.h --- fsvs-1.1.14/src/cache.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/cache.h 2008-10-25 12:11:00.000000000 +0100 @@ -25,10 +25,10 @@ struct cache_entry_t { /** ID of entry */ cache_value_t id; - /** Length of data */ - int len; /** User-data for hashes */ cache_value_t hash_data; + /** Length of data */ + int len; #if 0 /** Measurement of accesses */ short accessed; @@ -75,13 +75,14 @@ * } * \enddot * */ int lru; + /** Cache entries, \c NULL terminated. */ struct cache_entry_t *entries[CACHE_DEFAULT+1]; }; -/** Adds a copy of the given data to the cache; return the new data - * pointer. +/** Adds a copy of the given data (\a id, \a data with \a len) to the \a + * cache; return the new allocated data pointer in \a copy. * */ int cch__add(struct cache_t *cache, cache_value_t id, const char *data, int len, diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/cat.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/cat.c --- fsvs-1.1.14/src/cat.c 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/src/cat.c 2008-10-25 12:12:13.000000000 +0100 @@ -0,0 +1,85 @@ +/************************************************************************ + * Copyright (C) 2008 Philipp Marek. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + ************************************************************************/ + +#include +#include + + +#include "global.h" +#include "waa.h" +#include "revert.h" +#include "url.h" +#include "est_ops.h" + + +/** \file + * \ref cat action. + * + * \todo \code + * fsvs cat [-r rev] [-u URLname] path + * fsvs cat [-u URLname:rev] path + * \endcode + * */ + +/** + * \addtogroup cmds + * + * \section cat + * + * \code + * fsvs cat [-r rev] path + * \endcode + * + * Fetches a file with the specified revision or, if not given, BASE, from + * the repository, and outputs it to \c STDOUT. + * + * */ + +/** -. + * Main function. */ +int cat__work(struct estat *root, int argc, char *argv[]) +{ + int status; + char **normalized; + struct estat *sts; + struct svn_stream_t *output; + svn_error_t *status_svn; + + + status=0; + STOPIF_CODE_ERR( argc != 1, EINVAL, + "!Exactly a single path must be given."); + + STOPIF_CODE_ERR( opt_target_revisions_given > 1, EINVAL, + "!At most a single revision is allowed."); + + STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); + STOPIF( url__load_list(NULL, 0), NULL); + STOPIF( waa__input_tree( root, NULL, NULL), NULL); + + STOPIF( ops__traverse(root, normalized[0], + OPS__FAIL_NOT_LIST, 0, &sts), NULL); + + current_url=sts->url; + STOPIF_CODE_ERR( !current_url, EINVAL, + "!For this entry no URL is known."); + + STOPIF( url__open_session(NULL), NULL); + STOPIF_SVNERR( svn_stream_for_stdout, + (&output, global_pool)); + + STOPIF( rev__get_text_to_stream( normalized[0], + opt_target_revisions_given ? + opt_target_revision : sts->repos_rev, + DECODER_UNKNOWN, + output, NULL, NULL, NULL, global_pool), NULL); + +ex: + return status; +} + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/cat.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/cat.h --- fsvs-1.1.14/src/cat.h 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/src/cat.h 2008-10-10 15:56:18.000000000 +0100 @@ -0,0 +1,19 @@ +/************************************************************************ + * Copyright (C) 2005-2008 Philipp Marek. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + ************************************************************************/ + +#ifndef __CACHE_H__ +#define __CACHE_H__ + + +/** \file + * \ref cat action header file. */ + +work_t cat__work; + +#endif + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/checksum.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/checksum.c --- fsvs-1.1.14/src/checksum.c 2008-03-17 06:07:07.000000000 +0000 +++ fsvs-1.1.17/src/checksum.c 2008-10-25 12:16:24.000000000 +0100 @@ -14,6 +14,7 @@ #include #include "checksum.h" +#include "helper.h" #include "global.h" #include "est_ops.h" #include "waa.h" @@ -247,7 +248,10 @@ * file has been checked already and fullpath is \c NULL, a debug message * can write \c (null), as then even the name calculation * is skipped. - * \param result is set to 0 for identical to old or !=0 for changed. + * \param result is set to \c 0 for identical to old and \c >0 for + * changed. + * As a special case this function returns \c <0 for don't know + * if the file is unreadable due to a \c EACCESS. * * \note Performance optimization * In normal circumstances not the whole file has to be read to get the @@ -264,10 +268,13 @@ unsigned char *filedata; int do_manber; char *cp; + struct sstat_t actual; md5_digest_t old_md5 = { 0 }; static struct t_manber_data mb_dat; + /* Default is "don't know". */ + if (result) *result = -1; if (S_ISDIR(sts->st.mode)) return 0; fh=-1; @@ -288,7 +295,12 @@ DEBUGP("hashing %s",fullpath); memcpy(old_md5, sts->md5, sizeof(old_md5)); - if (S_ISREG(sts->st.mode)) + /* We'll open and read the file now, so the additional lstat() doesn't + * really hurt - and it makes sure that we see the current values (or at + * least the _current_ ones :-). */ + STOPIF( hlp__lstat(fullpath, &actual), NULL); + + if (S_ISREG(actual.mode)) { do_manber=1; /* Open the file and read the stream from there, comparing the blocks @@ -296,7 +308,7 @@ * If a difference is found, stop, and mark file as different. */ /* If this call returns ENOENT, this entry simply has no md5s-file. * We'll have to MD5 it completely. */ - if (sts->st.size < CS__MIN_FILE_SIZE) + if (actual.size < CS__MIN_FILE_SIZE) do_manber=0; else { @@ -314,14 +326,30 @@ current_pos=0; fh=open(fullpath, O_RDONLY); - STOPIF_CODE_ERR(fh < 0, errno, - "open(\"%s\", O_RDONLY) failed", fullpath); + /* We allow a single special case on error handling: EACCES, which + * could simply mean that the file has mode 000. */ + if (fh<0) + { + /* The debug statement might change errno, so we have to save the + * value. */ + status=errno; + DEBUGP("File %s is unreadable: %d", fullpath, status); + if (status == EACCES) + { + status=0; + goto ex; + } + + /* Can that happen? */ + if (!status) status=EBUSY; + STOPIF(status, "open(\"%s\", O_RDONLY) failed", fullpath); + } status=0; - while (current_pos < sts->st.size) + while (current_pos < actual.size) { - if (sts->st.size-current_pos < MAPSIZE) - length_mapped=sts->st.size-current_pos; + if (actual.size-current_pos < MAPSIZE) + length_mapped=actual.size-current_pos; else length_mapped=MAPSIZE; DEBUGP("mapping %u bytes from %llu", diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/checksum.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/checksum.h --- fsvs-1.1.14/src/checksum.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/checksum.h 2008-04-22 08:15:56.000000000 +0100 @@ -21,9 +21,6 @@ * It stores the CRCs and MD5s of the manber-blocks of this file. */ struct cs__manber_hashes { - /** Number of manber-hash-entries stored */ - unsigned count; - /** The manber-hashes */ AC_CV_C_UINT32_T *hash; /** The MD5-digests */ @@ -33,6 +30,9 @@ off_t *end; /** The index into the above arrays - sorted by manber-hash. */ AC_CV_C_UINT32_T *index; + + /** Number of manber-hash-entries stored */ + unsigned count; }; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/commit.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/commit.c --- fsvs-1.1.14/src/commit.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/commit.c 2008-10-25 12:17:41.000000000 +0100 @@ -60,10 +60,6 @@ * If you're currently in \c /etc , you can even drop the \c /etc/ in * front, and just use the filenames. * - * This extended path handling on the commandline is not yet available for - * every command. Most of them still expect you to be in the - * working copy root. - * * Please see \ref status for explanations on \c -v and \c -C . * For advanced backup usage see also \ref FSVS_PROP_COMMIT_PIPE. * */ @@ -100,7 +96,7 @@ -/** Typedef needed for \a ci__set_props() and \a prp__send(). See there. */ +/** Typedef needed for \a ci___send_user_props(). See there. */ typedef svn_error_t *(*change_any_prop_t) (void *baton, const char *name, const svn_string_t *value, @@ -175,17 +171,8 @@ "!The entry \"%s\" is still marked as conflict.", path); if (sts->entry_status || - (sts->flags & (RF_ADD | RF_UNVERSION | RF_PUSHPROPS)) ) - { - /* mark the entry as to-be-done. - * mark the parents too, so that we don't have to search - * in-depth. */ - while (sts->parent && !(sts->parent->entry_status & FS_CHILD_CHANGED)) - { - sts->parent->entry_status |= FS_CHILD_CHANGED; - sts=sts->parent; - } - } + (sts->flags & RF___COMMIT_MASK) ) + ops__mark_parent_cc(sts, entry_status); STOPIF( st__progress(sts), NULL); @@ -210,7 +197,7 @@ /* Set the current url for this entry. */ root->url=current_url; - if (root->entry_type == FT_DIR && root->entry_count) + if (S_ISDIR(root->st.mode) && root->entry_count) { sts=root->by_inode; while (*sts) @@ -227,7 +214,11 @@ /** Send the user-defined properties. - * See also \a ci__set_props(). */ + * + * The property table is left cleaned up, ie. any deletions that were + * ordered by the user have been done -- no properties with \c + * prp__prop_will_be_removed() will be here. + * */ int ci___send_user_props(void *baton, struct estat *sts, change_any_prop_t function, @@ -245,7 +236,7 @@ /* Do user-defined properties. * Could return ENOENT if none. */ - status=prp__open_byestat( sts, GDBM_READER, &db); + status=prp__open_byestat( sts, GDBM_WRITER, &db); DEBUGP("prop open: %d", status); if (status == ENOENT) status=0; @@ -257,16 +248,24 @@ while (status==0) { STOPIF( prp__fetch(db, key, &value), NULL); - str=svn_string_ncreate(value.dptr, value.dsize-1, pool); if (hlp__is_special_property_name(key.dptr)) { DEBUGP("ignoring %s - should not have been taken?", key.dptr); } + else if (prp__prop_will_be_removed(value)) + { + DEBUGP("removing property %s", key.dptr); + + STOPIF_SVNERR( function, (baton, key.dptr, NULL, pool) ); + STOPIF( hsh__register_delete(db, key), NULL); + } else { DEBUGP("sending property %s=(%d)%.*s", key.dptr, value.dsize, value.dsize, value.dptr); + + str=svn_string_ncreate(value.dptr, value.dsize-1, pool); STOPIF_SVNERR( function, (baton, key.dptr, str, pool) ); } @@ -282,9 +281,15 @@ } if (props) + { + STOPIF( hsh__collect_garbage(db, NULL), NULL); *props=db; + } else - STOPIF(hsh__close(db, status), NULL); + { + /* A hsh__close() does the garbage collection, too. */ + STOPIF( hsh__close(db, status), NULL); + } ex: return status; @@ -313,7 +318,7 @@ status=0; /* The meta-data properties are not sent for a symlink. */ - if (sts->entry_type != FT_SYMLINK) + if (!S_ISLNK(sts->updated_mode)) { /* owner */ str=svn_string_createf (pool, "%u %s", @@ -392,7 +397,7 @@ STOPIF( ci___send_user_props(baton, sts, editor->change_file_prop, &db, pool), NULL); - if (sts->entry_type != FT_SYMLINK) + if (!S_ISLNK(sts->updated_mode)) STOPIF_SVNERR( ci___set_props, (baton, sts, editor->change_file_prop, pool) ); @@ -434,9 +439,9 @@ else { has_manber=0; - switch (sts->entry_type) + switch (sts->st.mode & S_IFMT) { - case FT_SYMLINK: + case S_IFLNK: STOPIF( ops__link_to_string(sts, filename, &cp), NULL); STOPIF( hlp__local2utf8(cp, &cp, -1), NULL); /* It is not defined whether svn_stringbuf_create copies the string, @@ -444,14 +449,14 @@ * Knowing people wanted. */ str=svn_stringbuf_create(cp, pool); break; - case FT_BDEV: - case FT_CDEV: + case S_IFBLK: + case S_IFCHR: /* See above */ /* We only put ASCII in this string */ str=svn_stringbuf_create( ops__dev_to_filedata(sts), pool); break; - case FT_FILE: + case S_IFREG: STOPIF( apr_file_open(&a_stream, filename, APR_READ, 0, pool), "open file \"%s\" for reading", filename); @@ -468,17 +473,20 @@ if (transfer_text) { status= prp__get(db, propval_commitpipe, &encoder_prop); + /* The user-defined properties have already been sent, so the + * propval_commitpipe would already be cleared; we don't need to + * check for prp__prop_will_be_removed(). */ if (status == 0) { STOPIF( hlp__encode_filter(s_stream, encoder_prop.dptr, 0, - &s_stream, &encoder, pool), NULL ); + filename, &s_stream, &encoder, pool), NULL ); encoder->output_md5= &(sts->md5); } } break; default: - BUG("invalid/unknown file type 0x%X", sts->entry_type); + BUG("invalid/unknown file type 0%o", sts->st.mode); } /* for special nodes */ @@ -570,10 +578,10 @@ char *filename; char* utf8_filename; svn_error_t *status_svn; - struct sstat_t dummy_stat64; char *src_path; svn_revnum_t src_rev; - + struct sstat_t stat; + status=0; subpool=NULL; @@ -582,7 +590,6 @@ { sts=dir->by_inode[i]; - /* The flags are stored persistently; we have to check whether this * entry shall be committed. */ if ( (sts->flags & RF___COMMIT_MASK) && sts->do_this_entry) @@ -596,6 +603,8 @@ { /* The entry_status is set depending on the do_this_entry already; * if it's not 0, it's got to be committed. */ + /* Maybe a child needs attention (with FS_CHILD_CHANGED), so we have + * to recurse. */ } else /* Completely ignore item if nothing to be done. */ @@ -612,10 +621,12 @@ /* as the path needs to be canonical we strip the ./ in front */ STOPIF( hlp__local2utf8(filename+2, &utf8_filename, -1), NULL ); - STOPIF( st__status(sts), NULL); + DEBUGP("%s: action %X, updated mode 0%o, flags %X, filter %d", + filename, sts->entry_status, sts->updated_mode, sts->flags, + ops__allowed_by_filter(sts)); - DEBUGP("%s: action is %X, type is %X, flags %X", - filename, sts->entry_status, sts->entry_type, sts->flags); + if (ops__allowed_by_filter(sts)) + STOPIF( st__status(sts), NULL); exists_now= !(sts->flags & RF_UNVERSION) && ( (sts->entry_status & (FS_NEW | FS_CHANGED | FS_META_CHANGED)) || @@ -667,7 +678,7 @@ /* access() would possibly be a bit lighter, but doesn't work * for broken symlinks. */ /* TODO: Could we use FS_REMOVED here?? */ - if (hlp__lstat(filename, &dummy_stat64)) + if (hlp__lstat(filename, &stat)) { /* If an entry doesn't exist, but *should*, as it's marked RF_ADD, * we fail (currently). @@ -680,11 +691,20 @@ continue; } + /* In case this entry is a directory that's only done because of its + * children we shouldn't change its known data - we'd silently change + * eg. the mtime. */ + if (sts->do_this_entry && ops__allowed_by_filter(sts)) + { + sts->st=stat; + DEBUGP("set st for %s", sts->name); + } + /* We need a baton. */ baton=NULL; - /* If this entry has the RF_ADD flag set, is the base entry for a copy, - * or is FS_NEW, it is new (as far as subversion is concerned). + /* If this entry has the RF_ADD or RF_COPY_BASE flag set, or is FS_NEW, + * it is new (as far as subversion is concerned). * If this is an implicitly copied entry, subversion already knows * about it, so use open_* instead of add_*. */ if ((sts->flags & (RF_ADD | RF_COPY_BASE) ) || @@ -695,7 +715,7 @@ else { status_svn= - (sts->entry_type == FT_DIR ? + (S_ISDIR(sts->updated_mode) ? editor->open_directory : editor->open_file) ( utf8_filename, dir_baton, current_url->current_rev, @@ -710,7 +730,7 @@ * Now these directories need not exist in URL2 - we create them on * demand. * */ - if (sts->entry_type == FT_DIR && + if (S_ISDIR(sts->st.mode) && ( status_svn->apr_err == SVN_ERR_FS_PATH_SYNTAX || status_svn->apr_err == SVN_ERR_FS_NOT_DIRECTORY ) && @@ -723,7 +743,7 @@ } else STOPIF_CODE_ERR(1, status_svn->apr_err, - sts->entry_type == FT_DIR ? + S_ISDIR(sts->st.mode) ? "open_directory" : "open_file"); } @@ -762,16 +782,18 @@ filename, src_path, src_rev); /** \name STOPIF_SVNERR_INDIR */ STOPIF_SVNERR_TEXT( - (sts->entry_type == FT_DIR ? + (S_ISDIR(sts->updated_mode) ? editor->add_directory : editor->add_file), - sts->entry_type == FT_DIR ? "add_directory" : "add_file", (utf8_filename, dir_baton, src_path, src_rev, - subpool, &baton) - ); + subpool, &baton), + "%s(\"%s\", source=\"%s\"@%s)", + S_ISDIR(sts->updated_mode) ? "add_directory" : "add_file", + filename, src_path, hlp__rev_to_string(src_rev)); DEBUGP("baton for new %s %p (parent %p)", sts->name, baton, dir_baton); + /* If it's copy base, we need to clean up all flags below; else we * just remove an (ev. set) add-flag. */ if (sts->flags & RF_COPY_BASE) @@ -779,6 +801,7 @@ else { sts->flags &= ~RF_ADD; + sts->entry_status |= FS_NEW | FS_META_CHANGED; } } @@ -787,7 +810,7 @@ committed_entries++; DEBUGP("doing changes, flags=%X", sts->flags); /* Now we have a baton. Do changes. */ - if (sts->entry_type == FT_DIR) + if (S_ISDIR(sts->updated_mode)) { STOPIF_SVNERR( ci__directory, (editor, sts, baton, subpool) ); STOPIF_SVNERR( editor->close_directory, (baton, subpool) ); @@ -798,15 +821,9 @@ STOPIF_SVNERR( editor->close_file, (baton, NULL, subpool) ); } - /* Update data structures. - * This must be done here, as updating a directory will change - * its mtime, link count, ... - * In case a directory had many changed files it's possible that - * the old filename cache is no longer valid, so get it afresh. */ - STOPIF( ops__build_path(&filename, sts), NULL); - STOPIF( hlp__lstat(filename, &(sts->st)), NULL); - if (!sts->url || url__current_has_precedence(sts->url)) + /* Now this paths exists in this URL. */ + if (url__current_has_precedence(sts->url)) { DEBUGP("setting URL of %s", filename); sts->url=current_url; @@ -815,47 +832,56 @@ } - /* If we try to send properties for the root directory, we get "out of - * date" ... even if nothing changed. So don't do that now, until we - * know a way to make that work. + /* When a directory has been committed (with all changes), + * we can drop the check flag. + * If we only do parts of the child list, we must set it, so that we know + * to check for newer entries on the next status. (The directory + * structure must possibly be built in the repository, so we have to do + * each layer, and after a commit we take the current timestamp -- so we + * wouldn't see changes that happened before the partly commit.) */ + if (! (dir->do_this_entry && ops__allowed_by_filter(dir)) ) + dir->flags |= RF_CHECK; + else + dir->flags &= ~RF_CHECK; + + + /* That this entry belongs to this URL has already been set by the + * parent loop. */ + + + /* Given this example: + * $ mkdir -p dir/sub/subsub + * $ touch dir/sub/subsub/file + * $ fsvs ci dir/sub/subsub + * + * Now "sub" gets committed because of its children; as having a + * directory *without* meta-data in the repository is worse than having + * valid data set, we push the meta-data properties for *new* + * directories, and otherwise if they should be done and got something + * changed. */ + /* Regarding the "dir->parent" check: If we try to send properties for + * the root directory, we get "out of date" ... even if nothing changed. + * So don't do that now, until we know a way to make that work. * * Problem case: user creates an empty directory in the repository "svn * mkdir url:///", then sets this directory as base, and we try to commit - * "it's empty, after all". * Needing an update is not nice - but maybe what we'll have to do. */ - if (dir->parent) + if ((dir->do_this_entry && + ops__allowed_by_filter(dir) && + dir->parent && + /* Are there properties to push? */ + (dir->entry_status & (FS_META_CHANGED | FS_PROPERTIES))) || + (dir->entry_status & FS_NEW)) { - /* Do the meta-data and other properties of the current directory. */ - // TODO if (sts->do_tree) + STOPIF_SVNERR( ci___set_props, + (dir_baton, dir, editor->change_dir_prop, pool) ); + STOPIF( ci___send_user_props(dir_baton, dir, editor->change_dir_prop, NULL, pool), NULL); - - if (dir->entry_status & (FS_META_CHANGED | FS_NEW)) - STOPIF_SVNERR( ci___set_props, - (dir_baton, dir, editor->change_dir_prop, pool) ); - - if (!dir->url || url__current_has_precedence(dir->url)) - { - DEBUGP("setting URL"); - dir->url=current_url; - dir->repos_rev=SET_REVNUM; - } } - /* When a directory has been committed (with all changes), - * we can drop the check flag. - * If we only do parts of the child list, we must set it, so that we know - * to check for newer entries on the next status. (The directory - * structure must possibly be built in the repository, so we have to do - * each layer, and after a commit we take the current timestamp -- so we - * wouldn't see changes that happened before the partly commit.) */ - if (dir->do_this_entry) - dir->flags &= ~RF_CHECK; - else - dir->flags |= RF_CHECK; - - ex: if (subpool) apr_pool_destroy(subpool); @@ -870,19 +896,15 @@ { char *editor_cmd, *cp; int l,status; - int fh; + apr_file_t *af; status=0; - *filename=strdup("/tmp/commit-tmp.XXXXXX"); - fh=mkstemp(*filename); - STOPIF_CODE_ERR(fh == -1, errno, - "mkstemp(%s)", *filename); + STOPIF( waa__get_tmp_name( NULL, filename, &af, global_pool), NULL); /* we close the file, as an editor might delete the file and * write a new. */ - STOPIF_CODE_ERR( close(fh) == -1, - errno, "close commit message file"); + STOPIF( apr_file_close(af), "close commit message file"); editor_cmd=getenv("EDITOR"); if (!editor_cmd) editor_cmd=getenv("VISUAL"); @@ -941,7 +963,10 @@ edit_baton=NULL; editor=NULL; - if (!opt_checksum) opt_checksum++; + opt__set_int(OPT__CHANGECHECK, + PRIO_MUSTHAVE, + opt__get_int(OPT__CHANGECHECK) | CHCHECK_DIRS | CHCHECK_FILE); + STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); @@ -983,9 +1008,10 @@ /* warn/break if file is empty ?? */ - STOPIF( url__open_session(&session), NULL); + STOPIF( url__open_session(NULL), NULL); + only_check_status=2; /* This is the first step that needs some wall time - descending * through the directories, reading inodes */ STOPIF( waa__read_or_build_tree(root, argc, normalized, argv, @@ -999,10 +1025,9 @@ if (st.st_size == 0) { + /* We're not using some mapped memory. */ DEBUGP("empty file"); opt_commitmsg="(none)"; - /* We're not using some mapped memory */ - opt_commitmsgfile=NULL; } else { @@ -1023,7 +1048,7 @@ printf("Committing to %s\n", current_url->url); STOPIF_SVNERR( svn_ra_get_commit_editor, - (session, + (current_url->session, &editor, &edit_baton, utf8_commit_msg, @@ -1033,7 +1058,7 @@ FALSE, // svn_boolean_t keep_locks, global_pool) ); - if (opt_commitmsgfile) + if (opt_commitmsgfile && st.st_size != 0) STOPIF_CODE_ERR( munmap(opt_commitmsg, st.st_size) == -1, errno, "munmap()"); if (commitmsg_is_temp) @@ -1045,6 +1070,10 @@ STOPIF_SVNERR( editor->open_root, (edit_baton, current_url->current_rev, global_pool, &root_baton) ); + /* Only children are updated, not the root. Do that here. */ + if (ops__allowed_by_filter(root)) + STOPIF( hlp__lstat( root->name, &root->st), NULL); + committed_entries=0; /* This is the second step that takes time. */ STOPIF_SVNERR( ci__directory, diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/cp_mv.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/cp_mv.c --- fsvs-1.1.14/src/cp_mv.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/cp_mv.c 2008-10-25 12:11:20.000000000 +0100 @@ -46,9 +46,9 @@ * fsvs cp load * \endcode * - * This command marks \c DEST as a copy of \c SRC at revision \c rev, so - * that on the next commit of \c DEST the corresponding source path is sent - * as copy source. + * The \c copy command marks \c DEST as a copy of \c SRC at revision \c + * rev, so that on the next commit of \c DEST the corresponding source path + * is sent as copy source. * * The default value for \c rev is \c BASE, ie. the revision the \c SRC * (locally) is at. @@ -196,6 +196,36 @@ * only the indicator ... is shown at the end. * * */ + +/** + * \addtogroup cmds + * + * \section uncp + * + * \code + * fsvs uncopy DEST [DEST ...] + * \endcode + * + * The \c uncopy command removes a \c copyfrom mark from the destination + * entry. This will make the entry unknown again, and reported as \c New on + * the next invocations. + * + * Only the base of a copy can be un-copied; if a directory structure was + * copied, and the given entry is just implicitly copied, this command will + * give you an error. + * + * This is not folded in \ref revert, because it's not clear whether \c + * revert should restore the original copyfrom data or remove the copy + * attribute; by using a special command this is no longer ambiguous. + * + * Example: + * \code + * $ fsvs copy SourceFile DestFile + * # Whoops, was wrong! + * $ fsvs uncopy DestFile + * \endcode + * */ + /* Or should for dirlist just the raw data be showed - common_files, * files_in_new_dir? */ @@ -368,10 +398,12 @@ struct cm___match_t { /** Name for this way of matching */ char name[8]; - /** Which entry types are allowed? */ - int entry_types; + /** Which entry type is allowed? */ + mode_t entry_type; /** Whether this can be avoided by an option. */ - int is_expensive; + int is_expensive:1; + /** Whether this match is allowed. */ + int is_enabled:1; /** Callback function for inserting elements */ cm___register_fn *insert; @@ -410,22 +442,25 @@ { [CM___NAME_F] = { .name="name", .to_key=cm___name_datum, .insert=cm___hash_register, .get_list=cm___hash_list, - .entry_types=FT_FILE, .filename=WAA__FILE_NAME_EXT}, + .entry_type=S_IFREG, .filename=WAA__FILE_NAME_EXT}, [CM___NAME_D] = { .name="name", .to_key=cm___name_datum, .insert=cm___hash_register, .get_list=cm___hash_list, - .entry_types=FT_DIR, .filename=WAA__DIR_NAME_EXT}, + .entry_type=S_IFDIR, .filename=WAA__DIR_NAME_EXT}, + [CM___DIRLIST] = { .name="dirlist", .get_list=cm___match_children, .format=cm___output_pct, - .entry_types=FT_DIR, }, + .entry_type=S_IFDIR, }, + { .name="md5", .to_key=cm___md5_datum, .is_expensive=1, .insert=cm___hash_register, .get_list=cm___hash_list, - .entry_types=FT_FILE, .filename=WAA__FILE_MD5s_EXT}, + .entry_type=S_IFREG, .filename=WAA__FILE_MD5s_EXT}, + { .name="inode", .to_key=cm___inode_datum, .insert=cm___hash_register, .get_list=cm___hash_list, - .entry_types=FT_FILE, .filename=WAA__FILE_INODE_EXT}, + .entry_type=S_IFDIR, .filename=WAA__FILE_INODE_EXT}, { .name="inode", .to_key=cm___inode_datum, .insert=cm___hash_register, .get_list=cm___hash_list, - .entry_types=FT_DIR, .filename=WAA__DIR_INODE_EXT}, + .entry_type=S_IFREG, .filename=WAA__DIR_INODE_EXT}, }; #define CM___MATCH_NUM (sizeof(cm___match_array)/sizeof(cm___match_array[0])) @@ -517,7 +552,7 @@ struct cm___candidate_t *cur, tmp_cand={0}; size_t simil_dir_count; int common; - struct estat **children; + struct estat **children, *curr; struct estat **others, *other_dir; int other_count, i; datum key; @@ -535,16 +570,17 @@ children=sts->by_inode; while (*children) { + curr=*children; /* Find entries with the same name. Depending on the type of the entry * we have to look in one of the two hashes. */ - if ((*children)->entry_type == FT_DIR) + if (S_ISDIR(curr->updated_mode)) name_match=cm___match_array+CM___NAME_D; - else if ((*children)->entry_type == FT_FILE) + else if (S_ISREG(curr->updated_mode)) name_match=cm___match_array+CM___NAME_F; else goto next_child; - key=(name_match->to_key)(*children); + key=(name_match->to_key)(curr); status=hsh__list_get(name_match->db, key, &key, &others, &other_count); @@ -662,8 +698,10 @@ for(i=0; ientry_type & match->entry_types) && - match->insert) + /* We need the original value (st.mode). estat::updated_mode would be + * 0 for a deleted node. */ + if (match->is_enabled && match->insert && + (sts->st.mode & S_IFMT) == match->entry_type ) { STOPIF( (match->insert)(sts, match), NULL); DEBUGP("inserted %s for %s", sts->name, match->name); @@ -688,7 +726,6 @@ struct cm___candidate_t candidates[HASH__LIST_MAX*CM___MATCH_NUM]; struct cm___candidate_t *cur, *list; size_t candidate_count; - int output_error; FILE *output=stdout; @@ -700,7 +737,6 @@ formatted=NULL; status=0; - output_error=0; candidate_count=0; overflows=0; path=NULL; @@ -713,7 +749,7 @@ match=cm___match_array+i; /* Avoid false positives. */ - if (!(entry->entry_type & match->entry_types)) + if ((entry->updated_mode & S_IFMT) != match->entry_type) continue; /* \todo Loop if too many for a single call. */ @@ -765,7 +801,7 @@ STOPIF( hlp__format_path(entry, path, &formatted), NULL); /* Print header line for this file. */ - output_error |= fprintf(output, "%s\n", formatted); + STOPIF_CODE_EPIPE( fprintf(output, "%s\n", formatted), NULL); /* Output list of matches */ for(j=0; jname, output); + STOPIF_CODE_EPIPE( fputs(match->name, output), NULL); if (opt_verbose && match->format) - output_error |= - fputs( match->format(match, candidates+j), output); + STOPIF_CODE_EPIPE( + fputs( match->format(match, candidates+j), output), + NULL); } } STOPIF( ops__build_path(&path, sts), NULL); STOPIF( hlp__format_path(sts, path, &formatted), NULL); - output_error |= fprintf(output, ":%s\n", formatted); + STOPIF_CODE_EPIPE( fprintf(output, ":%s\n", formatted), NULL); } if (overflows) - output_error |= fputs(" ...\n", output); + STOPIF_CODE_EPIPE( fputs(" ...\n", output), NULL); } else { @@ -809,17 +846,15 @@ if (opt_verbose>0) { STOPIF( hlp__format_path(entry, path, &formatted), NULL); - output_error |= fprintf(output, "- No copyfrom relation found for %s\n", formatted); + STOPIF_CODE_EPIPE( fprintf(output, + "- No copyfrom relation found for %s\n", + formatted), NULL); } else DEBUGP("No sources found for %s", path); } - output_error |= fflush(output); - - /* Could be something else ... but we can't write data, so we quit. */ - if (output_error<0) - status=EPIPE; + STOPIF_CODE_EPIPE( fflush(output), NULL); ex: return status; @@ -882,22 +917,22 @@ * we could maybe save some searching for all children.... */ if (sts->entry_status & FS_NEW) { - switch(sts->entry_type) + switch (sts->updated_mode & S_IFMT) { - case FT_DIR: - STOPIF(cm__find_dir_source(sts), NULL); + case S_IFDIR: + STOPIF( cm__find_dir_source(sts), NULL); break; - case FT_SYMLINK: - case FT_FILE: - STOPIF(cm__find_file_source(sts), NULL); + case S_IFLNK: + case S_IFREG: + STOPIF( cm__find_file_source(sts), NULL); break; default: DEBUGP("Don't handle entry %s", sts->name); } } - if (sts->entry_type == FT_DIR && - (sts->entry_status & FS_CHILD_CHANGED) ) + if (S_ISDIR(sts->updated_mode) && + (sts->entry_status & (FS_CHILD_CHANGED | FS_CHANGED)) ) STOPIF( cm__find_copied(sts), NULL); child++; @@ -922,9 +957,8 @@ /* Operate recursively. */ opt_recursive++; /* But do not allow to get current MD5s - we need the data from the - * repository. - * TODO? */ - opt_checksum=0; + * repository. */ + opt__set_int(OPT__CHANGECHECK, PRIO_MUSTHAVE, CHCHECK_NONE); STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); @@ -941,8 +975,8 @@ { match=cm___match_array+i; - if (match->is_expensive && !opt__get_int(OPT__COPYFROM_EXP)) - match->entry_types=0; + match->is_enabled= !match->is_expensive || + opt__get_int(OPT__COPYFROM_EXP); if (!match->filename[0]) continue; @@ -967,10 +1001,10 @@ STOPIF( cm__find_copied(root), NULL); if (!copydetect_count) - printf("No copyfrom relations found.\n"); + STOPIF_CODE_EPIPE( printf("No copyfrom relations found.\n"), NULL); else if (opt_verbose>0) - printf("%d copyfrom relation%s found.\n", - copydetect_count, copydetect_count == 1 ? "" : "s"); + STOPIF_CODE_EPIPE( printf("%d copyfrom relation%s found.\n", + copydetect_count, copydetect_count == 1 ? "" : "s"), NULL); ex: for(i=0; iflags &= ~RF_COPY_SUB; + + if (cur->flags & (RF_ADD | RF_PUSHPROPS)) + all_ign=0; + + if (S_ISDIR(cur->updated_mode) && cur->entry_count) + { + sts=cur->by_inode; + while (*sts) + { + all_ign &= cm___ignore_impl_copied(*sts); + sts++; + } + } + + if (all_ign) + cur->to_be_ignored=1; + DEBUGP("%s: all_ignore=%d", cur->name, all_ign); + + return all_ign; +} + + +/** -. + * */ +int cm__uncopy(struct estat *root, int argc, char *argv[]) +{ + int status; + char **normalized; + struct estat *dest; + + + /* Do only the selected elements. */ + opt_recursive=-1; + + if (!argc) + ac__Usage_this(); + + STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); + + STOPIF( url__load_nonempty_list(NULL, 0), NULL); + + only_check_status=1; + /* Load the current data, without updating */ + status=waa__input_tree(root, NULL, NULL); + if (status == ENOENT) + STOPIF( EINVAL, "!No working copy could be found."); + else + STOPIF( status, NULL); + + while (*normalized) + { + DEBUGP("uncopy %s %s", *normalized, normalized[1]); + + STOPIF( ops__traverse(root, *normalized, + OPS__FAIL_NOT_LIST, 0, + &dest), + "!The entry \"%s\" is not known.", *normalized); + STOPIF_CODE_ERR( !(dest->flags & RF_COPY_BASE), EINVAL, + "!The entry \"%s\" is not a copy base.", *normalized); + + /* Directly copied, unchanged entry. + * Make it unknown - remove copy relation (ie. mark hash value for + * deletion), and remove entry from local list. */ + STOPIF( cm__get_source(dest, NULL, NULL, NULL, 1), NULL); + + dest->flags &= ~RF_COPY_BASE; + + /* That removes all not explicitly added entries from this subtree. */ + cm___ignore_impl_copied(dest); + + normalized++; + } + + STOPIF( waa__output_tree(root), NULL); + /* Purge. */ + STOPIF( cm__get_source(NULL, NULL, NULL, NULL, 0), NULL); + +ex: + return status; +} + + /** -. * */ int cm__work(struct estat *root, int argc, char *argv[]) @@ -1332,8 +1453,9 @@ /* Load the current data, without updating */ status=waa__input_tree(root, NULL, NULL); - if (status == ENOENT) - STOPIF(status, "!No data about current entries is available."); + if (status == -ENOENT) + STOPIF(status, "!No entries are currently known, " + "so you can't define copy or move relations yet.\n"); STOPIF(status, NULL); if (is_load) diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/cp_mv.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/cp_mv.h --- fsvs-1.1.14/src/cp_mv.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/cp_mv.h 2008-06-20 06:02:24.000000000 +0100 @@ -19,9 +19,9 @@ work_t cm__work; /** For automatically finding relations. */ work_t cm__detect; +/** For removing copyfrom relations. */ +work_t cm__uncopy; -/** Worker function. */ -action_t cm__action; /** Returns the source of the given entry. */ int cm__get_source(struct estat *sts, char *name, diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/dev/permutate-all-tests /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/dev/permutate-all-tests --- fsvs-1.1.14/src/dev/permutate-all-tests 2008-02-22 19:01:35.000000000 +0000 +++ fsvs-1.1.17/src/dev/permutate-all-tests 2008-10-29 07:18:25.000000000 +0000 @@ -16,6 +16,7 @@ # published by the Free Software Foundation. ########################################################################## +use Fcntl qw(FD_CLOEXEC F_SETFD F_GETFD); # ############################################################################# @@ -85,9 +86,10 @@ for $release ("", "--enable-release", "--enable-debug") { # make sure that the binary gets recompiled - $conf_cmd="( cd .. && ./configure $release ) && touch config.h && make"; - system("( $conf_cmd ) > /tmp/fsvs-conf.txt 2>&1") && - die "configure problem: $?"; + $conf_cmd="( cd .. && ./configure $release ) && ". + "touch config.h && make -j$parallel"; + system("( $conf_cmd ) > /tmp/fsvs-conf.txt 2>&1") && + die "configure problem: $?"; # Start the slow, uncommon tasks first. for $prot ("svn+ssh", "file://") @@ -170,7 +172,20 @@ # $x=(0.5 < rand())+0; print "$$: exit with $x\n"; exit($x); # this is the child ... - $parms="LC_ALL=$lang TESTBASEx=$PTESTBASE2 PROTOCOL=$prot RANDOM_ORDER=1"; + pipe(FAILREAD, FAILWRITE) || die "pipe: $!"; + +# sudo closes the filehandles above 2, and I found no way to get it to +# keep them open. +# So we have to give a path name to the children. + $tl=$ENV{"TEST_LIST"}; + $parms="LC_ALL=$lang" . + " 'TESTBASEx=$PTESTBASE2'" . + " 'PROTOCOL=$prot'" . + " RANDOM_ORDER=1" . + ($tl ? " 'TEST_LIST=$tl'" : "") . + " TEST_FAIL_WRITE_HDL=/proc/$$/fd/".fileno(FAILWRITE) . +# And it can have our STDERR. + " TEST_TTY_HDL=/proc/$$/fd/2"; # To avoid getting N*N running tasks for a "-j N", we explicitly say 1. # Parallel execution within the tests is not done yet, but better safe @@ -194,18 +209,34 @@ die $! unless defined($pid); if (!$pid) { + close FAILREAD; + $ENV{"MAKEFLAGS"}=""; + open(STDIN, "< /dev/null") || die $!; open(STDOUT, ">&LOG") || die $!; open(STDERR, ">&LOG") || die $!; + + $x=fcntl(FAILWRITE, F_GETFD, 0); + fcntl(FAILWRITE, F_SETFD, $x & ~FD_CLOEXEC); + # sudo removes some environment variables, so set all options via make. exec $cmd; die; } + # Give the child some time to take the write side. + # If we ever get more than 4/64 kB of failed tests this will hang. die $! if waitpid($pid, 0) == -1; $error=$?; + # We have to close the write side of the pipe, so that on reading we'll + # see an EOF. + close FAILWRITE; + @failed=map { chomp; $_; } ; + close FAILREAD; + + $end=time(); $t=EndText($start, $end); @@ -226,12 +257,15 @@ print LOG "\n", "$t\n", "$status $error: $user $parms\n", + "got failed as (", join(" ", @failed), ")\n", "\n", "$conf_cmd && $cmd\n"; close LOG; $u = $user || "user"; - print CSV "$sum,'$prot','$lang','$u','$release','$status'\n"; + print CSV join(",", $sum, map { "'$_'"; } + ($prot, $lang, $u, $release, $status, sort(@failed))), + "\n"; close CSV; # We cannot return $error directly ... only the low 8bit would @@ -270,5 +304,5 @@ { my($start, $end)=@_; return "Finished after ". ($end - $start) . " seconds (" . - localtime($end) . ").\n"; + localtime($end) . ")."; } diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/diff.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/diff.c --- fsvs-1.1.14/src/diff.c 2008-03-17 06:09:14.000000000 +0000 +++ fsvs-1.1.17/src/diff.c 2008-10-25 12:16:03.000000000 +0100 @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -17,6 +18,7 @@ #include "helper.h" #include "interface.h" #include "url.h" +#include "status.h" #include "options.h" #include "est_ops.h" #include "ignore.h" @@ -24,6 +26,7 @@ #include "racallback.h" #include "cp_mv.h" #include "warnings.h" +#include "diff.h" /** \file @@ -68,7 +71,13 @@ * For entries marked as copy the diff against the (clean) source entry is * printed. * - * Please see also \ref o_diff and \ref o_colordiff. */ + * Please see also \ref o_diff and \ref o_colordiff. + * + * \todo Two revisions diff is buggy in that it (currently) always fetches + * the full tree from the repository; this is not only a performance + * degradation, but you'll see more changed entries than you want. This + * will be fixed. + * */ int cdiff_pipe=STDOUT_FILENO; @@ -121,16 +130,12 @@ l1>=META_DIFF_MAXLEN || l2>=META_DIFF_MAXLEN, EINVAL, "Printing meta-data strings format error"); - if (l1 != l2 || strcmp(buf_new, buf_old) !=0) /* Different */ - status=printf("-%s\n+%s\n", buf_old, buf_new); - else - status=printf(" %s\n", buf_old); - - STOPIF_CODE_ERR(status<0, errno, - "Meta-data diff output error"); - - status=0; + STOPIF_CODE_EPIPE( + printf( + (l1 != l2 || strcmp(buf_new, buf_old) !=0) ? + "-%s\n+%s\n" : " %s\n", + buf_old, buf_new), NULL); ex: return status; @@ -145,9 +150,14 @@ * * If the user specified only a single revision (rev2 == 0), * the local file is diffed against this; else against the - * other repository version. */ + * other repository version. + * + * \a rev2_file is meaningful only if \a rev2 is 0; this file gets removed + * after printing the difference! + * */ int df__do_diff(struct estat *sts, - svn_revnum_t rev1, svn_revnum_t rev2) + svn_revnum_t rev1, + svn_revnum_t rev2, char *rev2_file) { int status; int ch_stat; @@ -158,13 +168,13 @@ char *path, *disp_dest, *disp_source; int len_d, len_s; char *b1, *b2; - struct sstat_t new_stat; - svn_revnum_t from, to; + struct estat sts_r2; char short_desc[10]; char *new_mtime_string, *other_mtime_string; - char *url_to_fetch; + char *url_to_fetch, *other_url; int is_copy; int fdflags; + apr_hash_t *props_r1, *props_r2; status=0; @@ -181,7 +191,7 @@ tmp_pid, WEXITSTATUS(ch_stat), ch_stat); STOPIF_CODE_ERR( !WIFEXITED(ch_stat), EIO, - "Child %d terminated abnormally", last_child); + "!Child %d terminated abnormally", tmp_pid); if (WEXITSTATUS(ch_stat) == 1) DEBUGP("exit code 1 - file has changed."); @@ -218,6 +228,9 @@ if (!sts) goto ex; + STOPIF( ops__build_path( &path, sts), NULL); + + url_to_fetch=NULL; /* If this entry is freshly copied, get it's source URL. */ is_copy=sts->flags & RF___IS_COPY; @@ -229,6 +242,8 @@ * soon as we allow "fsvs cp URL path". */ STOPIF( url__find(url_to_fetch, &sts->url), NULL); } + else + url_to_fetch=path+2; current_url = sts->url; @@ -242,20 +257,36 @@ * we can print both. */ /* \e From is always the "old" - base revision, or first given revision. * \e To is the newer version - 2nd revision, or local file. */ + /* TODO: use delta transfers for 2nd file. */ + sts_r2=*sts; if (rev2 != 0) { - STOPIF( rev__get_file(sts, rev2, NULL, - &to, &last_tmp_file2, - global_pool), + STOPIF( url__full_url(sts, NULL, &other_url), NULL); + + STOPIF( url__canonical_rev(current_url, &rev2), NULL); + STOPIF( rev__get_text_to_tmpfile(other_url, rev2, DECODER_UNKNOWN, + NULL, &last_tmp_file2, + NULL, &sts_r2, &props_r2, + current_url->pool), NULL); } - new_stat=sts->st; + else if (rev2_file) + { + DEBUGP("diff against %s", rev2_file); + /* Let it get removed. */ + last_tmp_file2=rev2_file; + } /* Now fetch the \e old version. */ - STOPIF( rev__get_file(sts, rev1, url_to_fetch, - &from, &last_tmp_file, - global_pool), - NULL); + STOPIF( url__canonical_rev(current_url, &rev1), NULL); + STOPIF( rev__get_text_to_tmpfile(url_to_fetch, rev1, DECODER_UNKNOWN, + NULL, &last_tmp_file, + NULL, sts, &props_r1, + current_url->pool), NULL); + + /* If we didn't flush the stdio buffers here, we'd risk getting them + * printed a second time from the child. */ + fflush(NULL); last_child=fork(); @@ -264,9 +295,11 @@ if (!last_child) { - STOPIF( ops__build_path( &path, sts), NULL); STOPIF( hlp__format_path(sts, path, &disp_dest), NULL); + /* Remove the ./ at the front */ + setenv(FSVS_EXP_CURR_ENTRY, path+2, 1); + disp_source= is_copy ? url_to_fetch : disp_dest; len_d=strlen(disp_dest); @@ -286,21 +319,19 @@ } - /* Checking \b which return value we get is unnecessary ... - * On \b every error we get \c -1 .*/ /* We need not be nice with memory usage - we'll be replaced soon. */ /* 30 chars should be enough for everyone */ b1=malloc(len_s + 60 + 30); b2=malloc(len_d + 60 + 30); - new_mtime_string=strdup(ctime(&new_stat.mtim.tv_sec )); + new_mtime_string=strdup(ctime(& sts_r2.st.mtim.tv_sec )); STOPIF_ENOMEM(!new_mtime_string); other_mtime_string=strdup(ctime(&sts->st.mtim.tv_sec )); STOPIF_ENOMEM(!other_mtime_string); sprintf(b1, "%s \tRev. %llu \t(%-24.24s)", - disp_source, (t_ull) from, other_mtime_string); + disp_source, (t_ull) rev1, other_mtime_string); if (rev2 == 0) { @@ -311,24 +342,24 @@ else { sprintf(b2, "%s \tRev. %llu \t(%-24.24s)", - disp_dest, (t_ull) to, new_mtime_string); - sprintf(short_desc, "r%llu", (t_ull) to); + disp_dest, (t_ull) rev2, new_mtime_string); + sprintf(short_desc, "r%llu", (t_ull) rev2); } /* Print header line, just like a recursive diff does. */ - STOPIF_CODE_ERR( printf("diff -u %s.r%llu %s.%s\n", - disp_source, (t_ull)from, - disp_dest, short_desc) < 0, errno, + STOPIF_CODE_EPIPE( printf("diff -u %s.r%llu %s.%s\n", + disp_source, (t_ull)rev1, + disp_dest, short_desc), "Diff header"); - if (opt_verbose>0) + if (opt_verbose>0) // TODO: && !symlink ...) { STOPIF( df___print_meta( "Mode: 0%03o", sts->st.mode & 07777, META_DIFF_DELIMITER, - new_stat.mode & 07777), + sts_r2.st.mode & 07777), NULL); STOPIF( df___print_meta( "MTime: %.24s", other_mtime_string, @@ -338,26 +369,31 @@ STOPIF( df___print_meta( "Owner: %d (%s)", sts->st.uid, hlp__get_uname(sts->st.uid, "undefined"), META_DIFF_DELIMITER, - new_stat.uid, hlp__get_uname(new_stat.uid, "undefined") ), + sts_r2.st.uid, hlp__get_uname(sts_r2.st.uid, "undefined") ), NULL); STOPIF( df___print_meta( "Group: %d (%s)", sts->st.gid, hlp__get_grname(sts->st.gid, "undefined"), META_DIFF_DELIMITER, - new_stat.gid, hlp__get_grname(new_stat.gid, "undefined") ), + sts_r2.st.gid, hlp__get_grname(sts_r2.st.gid, "undefined") ), NULL); } fflush(NULL); - - STOPIF_CODE_ERR( execlp( opt__get_string(OPT__DIFF_PRG), - opt__get_string(OPT__DIFF_PRG), - opt__get_string(OPT__DIFF_OPT), - last_tmp_file, - "--label", b1, - (rev2 == 0 ? path : last_tmp_file2), - "--label", b2, - opt__get_string(OPT__DIFF_EXTRA), - NULL) == -1, errno, + // TODO: if special_dev ... + + /* Checking \b which return value we get is unnecessary ... On \b + * every error we get \c -1 .*/ + execlp( opt__get_string(OPT__DIFF_PRG), + opt__get_string(OPT__DIFF_PRG), + opt__get_string(OPT__DIFF_OPT), + last_tmp_file, + "--label", b1, + (rev2 != 0 ? last_tmp_file2 : + rev2_file ? rev2_file : path), + "--label", b2, + opt__get_string(OPT__DIFF_EXTRA), + NULL); + STOPIF_CODE_ERR( 1, errno, "Starting the diff program \"%s\" failed", opt__get_string(OPT__DIFF_PRG)); } @@ -392,7 +428,7 @@ cdiff_pid, WEXITSTATUS(ret), ret); } - STOPIF( df__do_diff(NULL, 0, 0), NULL); + STOPIF( df__do_diff(NULL, 0, 0, 0), NULL); ex: return status; @@ -413,47 +449,88 @@ } +/** Does a diff of the local non-directory against the given revision. + * */ +int df___type_def_diff(struct estat *sts, svn_revnum_t rev, + apr_pool_t *pool) +{ + int status; + char *special_stg, *fn; + apr_file_t *apr_f; + apr_size_t wr_len, exp_len; + + + status=0; + special_stg=NULL; + switch (sts->updated_mode & S_IFMT) + { + case S_IFREG: + STOPIF( df__do_diff(sts, rev, 0, NULL), NULL); + break; + + case S_IFCHR: + case S_IFBLK: + special_stg=ops__dev_to_filedata(sts); + + /* Fallthrough, ignore first statement. */ + + case S_IFLNK: + if (!special_stg) + STOPIF( ops__link_to_string(sts, NULL, &special_stg), NULL); + + STOPIF( ops__build_path( &fn, sts), NULL); + STOPIF_CODE_EPIPE( printf("Special entry changed: %s\n", fn), NULL); + + /* As "diff" cannot handle special files directly, we have to + * write the expected string into a file, and diff against + * that. + * The remote version is fetched into a temporary file anyway. */ + STOPIF( waa__get_tmp_name(NULL, &fn, &apr_f, pool), NULL); + + wr_len=exp_len=strlen(special_stg); + STOPIF( apr_file_write(apr_f, special_stg, &wr_len), NULL); + STOPIF_CODE_ERR( wr_len != exp_len, ENOSPC, NULL); + + STOPIF( apr_file_close(apr_f), NULL); + + + STOPIF( df__do_diff(sts, rev, 0, fn), NULL); + break; + + default: + BUG("type?"); + } + +ex: + return status; +} + + /** -. */ -int df__action(struct estat *sts, char *fn) +int df___direct_diff(struct estat *sts) { int status; svn_revnum_t rev1, rev2; + char *fn; + STOPIF( ops__build_path( &fn, sts), NULL); + status=0; - if (sts->entry_type & FT_NONDIR) + if (!S_ISDIR(sts->updated_mode)) { DEBUGP("doing %s", fn); - /* Find correct revision */ - switch (opt_target_revisions_given) + /* Has to be set per sts. */ + rev1=sts->repos_rev; + rev2=0; + if ( (sts->entry_status & FS_REMOVED)) { - case 0: - /* Has to be set per sts. */ - rev1=sts->repos_rev; - rev2=0; - if ( (sts->entry_status & FS_REMOVED)) - { - printf("Only in repository: %s\n", fn); - goto ex; - } - break; - case 1: - rev1=opt_target_revision; - rev2=0; - break; - case 2: - rev1=opt_target_revision; - rev2=opt_target_revision2; - break; - default: - BUG("too many revisions"); - /* To avoid "used uninitialized" */ - goto ex; + STOPIF_CODE_EPIPE( printf("Only in repository: %s\n", fn), NULL); + goto ex; } - if (sts->entry_type & FT_IGNORE) - goto ex; + if (sts->to_be_ignored) goto ex; if ( (sts->entry_status & FS_NEW) || !sts->url) { @@ -464,7 +541,8 @@ else { if (opt_verbose>0) - printf("Only in local filesystem: %s\n", fn); + STOPIF_CODE_EPIPE( printf("Only in local filesystem: %s\n", + fn), NULL); goto ex; } } @@ -472,17 +550,16 @@ /* Local files must have changed; for repos-only diffs do always. */ if (sts->entry_status || opt_target_revisions_given) { - DEBUGP("doing diff rev1=%llu rev2=%llu", - (t_ull)rev1, (t_ull)rev2); - STOPIF( df__do_diff(sts, rev1, rev2), NULL); - - - /* If we don't wait for completion here, our temporary files are - * found and reported as new. I don't want to compare strings ... are - * there any better ideas? - * Maybe rev__get_file() should have a parameter telling *where* to - * put that file - and for diff we'd use /tmp. */ - STOPIF( df__do_diff(NULL, 0, 0), NULL); + DEBUGP("doing diff rev1=%llu", (t_ull)rev1); + if (S_ISDIR(sts->updated_mode)) + { + /* TODO: meta-data diff? */ + } + else + { + /* TODO: Some kind of pool handling in recursion. */ + STOPIF( df___type_def_diff(sts, rev1, global_pool), NULL); + } } } else @@ -495,6 +572,32 @@ } +/** A cheap replacement for colordiff. + * Nothing more than a \c cat. */ +int df___cheap_colordiff(void) +{ + int status; + char *tmp; + const int tmp_size=16384; + + status=0; + tmp=alloca(tmp_size); + while ( (status=read(STDIN_FILENO,tmp, tmp_size)) > 0 ) + if ( (status=write(STDOUT_FILENO, tmp, status)) == -1) + break; + + if (status == -1) + { + STOPIF_CODE_ERR(errno != EPIPE, errno, + "Getting or pushing diff data"); + status=0; + } + +ex: + return status; +} + + /** Tries to start colordiff. * If colordiff can not be started, but the option says \c auto, we just * forward the data. Sadly neither \c splice nor \c sendfile are available @@ -502,17 +605,34 @@ * */ int df___colordiff(int *handle, pid_t *cd_pid) { - static const char program[]="colordiff"; + const char *program; int status; - char *tmp; - const int tmp_size=16384; - int pipes[2], fdflags; - + int pipes[2], fdflags, success[2]; status=0; + program=opt__get_int(OPT__COLORDIFF) ? + opt__get_string(OPT__COLORDIFF) : + "colordiff"; + STOPIF_CODE_ERR( pipe(pipes) == -1, errno, "No more pipes"); + STOPIF_CODE_ERR( pipe(success) == -1, errno, + "No more pipes, case 2"); + /* There's a small problem if the parent gets scheduled before the child, + * and the child doesn't find the colordiff binary; then the parent might + * only find out when it tries to send the first data across the pipe. + * + * But the successfully spawned colordiff won't report success, so the + * parent would have to wait for a fail message - which delays execution + * unnecessary - or simply live with diff getting EPIPE. + * + * Trying to get it scheduled by sending it a signal (which will be + * ignored) doesn't work reliably, too. + * + * The only way I can think of is opening a second pipe in reverse + * direction; if there's nothing to be read but EOF, the program could be + * started - else we get a single byte, signifying an error. */ *cd_pid=fork(); STOPIF_CODE_ERR( *cd_pid == -1, errno, @@ -520,6 +640,13 @@ if (!*cd_pid) { + close(success[0]); + + fdflags=fcntl(success[1], F_GETFD); + fdflags |= FD_CLOEXEC; + fcntl(success[1], F_SETFD, fdflags); + + STOPIF_CODE_ERR( ( dup2(pipes[0], STDIN_FILENO) | close(pipes[1]) | @@ -528,20 +655,41 @@ execlp( program, program, NULL); - if (opt__get_int(OPT__COLORDIFF) == OPT__YES) - STOPIF_CODE_ERR(1, errno, - "Cannot start \"%s\" program", program); + + /* "" as value means best effort, so no error; any other string should + * give an error. */ + if (opt__get_int(OPT__COLORDIFF) != 0) + { + fdflags=errno; + if (!fdflags) fdflags=EINVAL; + + /* Report an error to the parent. */ + write(success[1], &fdflags, sizeof(fdflags)); + + STOPIF_CODE_ERR_GOTO(1, fdflags, quit, + "!Cannot start colordiff program \"%s\"", program); + } + + close(success[1]); /* Well ... do the best. */ - tmp=alloca(tmp_size); - while ( (status=read(STDIN_FILENO,tmp, tmp_size)) > 0 ) - write(STDOUT_FILENO, tmp, status); + /* We cannot use STOPIF() and similar, as that would return back up to + * main - and possibly cause problems somewhere else. */ + status=df___cheap_colordiff(); - STOPIF_CODE_ERR( status == -1, errno, "Cannot get data"); - exit(0); +quit: + exit(status ? 1 : 0); } close(pipes[0]); + close(success[1]); + + status=read(success[0], &fdflags, sizeof(fdflags)); + close(success[0]); + STOPIF_CODE_ERR( status>0, fdflags, + "!The colordiff program \"%s\" doesn't accept any data.\n" + "Maybe it couldn't be started, or stopped unexpectedly?", + opt__get_string(OPT__COLORDIFF) ); /* For svn+ssh connections a ssh process is spawned off. @@ -553,7 +701,6 @@ /* Does this return errors? */ fcntl(pipes[1], F_SETFD, fdflags); - *handle=pipes[1]; DEBUGP("colordiff is %d", *cd_pid); @@ -562,6 +709,156 @@ } +/** Prints diffs for all entries with estat::entry_status or + * estat::remote_status set. */ +int df___diff_wc_remote(struct estat *entry, apr_pool_t *pool) +{ + int status; + struct estat **sts; + int removed; + char *fn, *special_stg; + apr_pool_t *subpool; + + + status=0; + subpool=NULL; + STOPIF( apr_pool_create(&subpool, pool), NULL); + + removed = + ( ((entry->remote_status & FS_REPLACED) == FS_REMOVED) ? 1 : 0 ) | + ( ((entry->entry_status & FS_REPLACED) == FS_REMOVED) ? 2 : 0 ); + + STOPIF( ops__build_path(&fn, entry), NULL); + DEBUGP("%s: removed=%X loc=%s rem=%s", fn, removed, + st__status_string_fromint(entry->entry_status), + st__status_string_fromint(entry->remote_status)); + + /* TODO: option to print the whole lot of removed and "new" lines for + * files existing only at one point? */ + switch (removed) + { + case 3: + /* Removed both locally and remote; no change to print. (?) */ + break; + + case 1: + /* Remotely removed. */ + STOPIF_CODE_EPIPE( printf("Only locally: %s\n", fn), NULL); + break; + + case 2: + /* Locally removed. */ + STOPIF_CODE_EPIPE( printf("Only in the repository: %s\n", fn), NULL); + break; + + case 0: + /* Exists on both; do recursive diff. */ + + if (entry->entry_status || entry->remote_status) + { + special_stg=NULL; + + if (S_ISDIR(entry->updated_mode)) + { + /* TODO: meta-data diff? */ + if (entry->entry_count) + { + sts=entry->by_inode; + while (*sts) + { + STOPIF( df___diff_wc_remote(*sts, subpool), NULL); + sts++; + } + } + } + else + STOPIF( df___type_def_diff(entry, entry->repos_rev, subpool), NULL); + } + + break; + } + +ex: + /* This is of type (void), so we don't have any status to check. */ + if (subpool) apr_pool_destroy(subpool); + + return status; +} + + +/** Set the entry as BASE (has no changes). */ +int df___reset_remote_st(struct estat *sts) +{ + sts->remote_status=0; + return 0; +} + + +/** Does a repos/repos diff. + * Currently works only for files. */ +int df___repos_repos(struct estat *sts) +{ + int status; + char *fullpath, *path; + struct estat **children; + + + STOPIF( ops__build_path( &fullpath, sts), NULL); + DEBUGP("%s: %s", fullpath, st__status_string_fromint(sts->remote_status)); + + STOPIF( hlp__format_path( sts, fullpath, &path), NULL); + + if ((sts->remote_status & FS_REPLACED) == FS_REPLACED) + STOPIF_CODE_EPIPE( + printf("Completely replaced: %s\n", path), NULL); + else if (sts->remote_status & FS_NEW) + STOPIF_CODE_EPIPE( + printf("Only in r%llu: %s\n", + (t_ull)opt_target_revision2, path), NULL); + else if ((sts->remote_status & FS_REPLACED) == FS_REMOVED) + STOPIF_CODE_EPIPE( + printf("Only in r%llu: %s\n", + (t_ull)opt_target_revision, path), NULL); + else if (sts->remote_status) + switch (sts->st.mode & S_IFMT) + { + case S_IFDIR: + /* TODO: meta-data diff? */ + if (sts->entry_count) + { + children=sts->by_inode; + while (*children) + STOPIF( df___repos_repos(*(children++)), NULL); + } + + break; + + /* Normally a repos-repos diff can only show symlinks changing - + * all other types of special entries get *replaced*. */ + case S_IFANYSPECIAL: + /* We don't know yet which special type it is. */ + case S_IFLNK: + case S_IFBLK: + case S_IFCHR: + STOPIF_CODE_EPIPE( printf("Special entry changed: %s\n", + path), NULL); + /* Fallthrough */ + + case S_IFREG: + STOPIF( df__do_diff(sts, + opt_target_revision, opt_target_revision2, NULL), + NULL); + break; + + default: + BUG("type?"); + } + +ex: + return status; +} + + /** -. * * We get the WC status, fetch the named changed entries, and call @@ -572,12 +869,14 @@ int df__work(struct estat *root, int argc, char *argv[]) { int status; + int i, deinit; char **normalized; + svn_revnum_t rev, base; + char *norm_wcroot[2]= {".", NULL}; status=0; - /* Default to non-recursive. */ - opt_recursive--; + deinit=1; STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); @@ -587,9 +886,12 @@ signal(SIGINT, df___signal); signal(SIGTERM, df___signal); signal(SIGHUP, df___signal); + signal(SIGCHLD, SIG_DFL); + /* check for colordiff */ - if (opt__get_int(OPT__COLORDIFF) && + if (( opt__get_int(OPT__COLORDIFF)==0 || + opt__doesnt_say_off(opt__get_string(OPT__COLORDIFF)) ) && (isatty(STDOUT_FILENO) || opt__get_prio(OPT__COLORDIFF) > PRIO_PRE_CMDLINE) ) { @@ -597,16 +899,120 @@ STOPIF( df___colordiff(&cdiff_pipe, &cdiff_pid), NULL); } + /* TODO: If we get "-u X@4 Y@4:3 Z" we'd have to do different kinds of + * diff for the URLs. + * What about filenames? */ + STOPIF( url__mark_todo(), NULL); + + switch (opt_target_revisions_given) + { + case 0: + /* Diff WC against BASE. */ + + action->local_callback=df___direct_diff; + /* We know that we've got a wc base because of + * waa__find_common_base() above. */ + STOPIF( waa__read_or_build_tree(root, argc, + normalized, argv, NULL, 1), NULL); + break; + + case 1: + /* WC against rX. */ + /* Fetch local changes ... */ + action->local_callback=st__progress; + action->local_uninit=st__progress_uninit; + STOPIF( waa__read_or_build_tree(root, argc, normalized, argv, NULL, 1), NULL); + // Has to set FS_CHILD_CHANGED somewhere + + /* Fetch remote changes ... */ + while ( ! ( status=url__iterator(&rev) ) ) + { + STOPIF( cb__record_changes(root, rev, current_url->pool), NULL); + } + STOPIF_CODE_ERR( status != EOF, status, NULL); + + STOPIF( df___diff_wc_remote(root, current_url->pool), NULL); + break; + + case 2: + /* rX:Y. + * This works in a single loop because the URLs are sorted in + * descending priority, and an entry removed at a higher priority + * could be replaced by one at a lower. */ + /* TODO: 2 revisions per-URL. */ + + /* If no entries are given, do the whole working copy. */ + if (!argc) + normalized=norm_wcroot; - status=waa__read_or_build_tree(root, argc, normalized, argv, NULL, 1); - if (status == ENOENT) - STOPIF( status, - "!No WAA information found; you probably didn't commit."); - STOPIF( status, "No working copy base found?"); + while ( ! ( status=url__iterator(&rev) ) ) + { + STOPIF( url__canonical_rev(current_url, &opt_target_revision), NULL); + STOPIF( url__canonical_rev(current_url, &opt_target_revision2), NULL); + + /* Take the values at the first revision as base; say that we've + * got nothing. */ + current_url->current_rev=0; + action->repos_feedback=df___reset_remote_st; + STOPIF( cb__record_changes(root, opt_target_revision, + current_url->pool), NULL); + + /* Now get changes. We cannot do diffs directly, because + * we must not use the same connection for two requests + * simultaneously. */ + action->repos_feedback=NULL; + + /* We say that the WC root is at the target revision, but that some + * paths are not. */ + base=current_url->current_rev; + current_url->current_rev=opt_target_revision2; + STOPIF( cb__record_changes_mixed(root, opt_target_revision2, + normalized, base, current_url->pool), + NULL); + } + STOPIF_CODE_ERR( status != EOF, status, NULL); - STOPIF( df___cleanup(), NULL); + + /* If we'd use the log functions to get a list of changed files + * we'd be slow for large revision ranges; for the various + * svn_ra_do_update, svn_ra_do_diff2 and similar functions we'd + * need the (complete) working copy base to get deltas against (as + * we don't know which entries are changed). + * + * This way seems to be the fastest, and certainly the easiest for + * now. */ + /* "time fsvs diff -r4:4" on "ssh+svn://localhost/..." for 8400 + * files gives a real time of 3.6sec. + * "time fsvs diff > /dev/null" on "ssh+svn://localhost/..." for 840 + * of 8400 files changed takes 1.8sec. + * */ + /* A possible idea would be to have a special delta-editor that + * accepts (not already known) directories as unchanged. + * Then it should be possible [1] to ask for the *needed* parts + * only, which should save a fair bit of bandwidth. + * + * Ad 1: Ignoring "does not exist" messages when we say "directory + * 'not-needed' is already at revision 'target'" and this isn't + * true. TODO: Test whether all ra layers make that possible. */ + + STOPIF( df___repos_repos(root), NULL); + status=0; + break; + default: + BUG("what?"); + } + + STOPIF( df__do_diff(NULL, 0, 0, 0), NULL); ex: + if (deinit) + { + deinit=0; + i=df___cleanup(); + if (!status && i) + STOPIF(i, NULL); + } + return status; } diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/diff.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/diff.h --- fsvs-1.1.14/src/diff.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/diff.h 2008-04-09 07:47:09.000000000 +0100 @@ -17,9 +17,6 @@ /** Diff command main function. */ work_t df__work; -/** Diff per-sts function. */ -action_t df__action; - #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/direnum.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/direnum.c --- fsvs-1.1.14/src/direnum.c 2008-02-22 06:06:15.000000000 +0000 +++ fsvs-1.1.17/src/direnum.c 2008-10-02 18:41:42.000000000 +0100 @@ -261,7 +261,7 @@ * \return +2, +1, 0, -1, -2, suitable for \a qsort(). */ inline int dir___f_sort_by_nameCC(const void *a, const void *b) { - return strcmp(a,b); + return strcoll(a,b); } @@ -295,7 +295,7 @@ int count, status; - BUG_ON(!S_ISDIR(sts->st.mode)); +// BUG_ON(!S_ISDIR(sts->updated_mode)); count=sts->entry_count+1; /* After copying we can release some space, as 64bit inodes * are smaller than 32bit pointers. @@ -322,7 +322,7 @@ * */ int dir__sortbyinode(struct estat *sts) { - BUG_ON(!S_ISDIR(sts->st.mode)); +// BUG_ON(!S_ISDIR(sts->updated_mode)); if (sts->entry_count) { BUG_ON(!sts->by_inode); @@ -558,7 +558,7 @@ /* The names-array has only the offsets stored. * So put correct values there. */ - sts->name=names[i] + this->strings; + sts->name=this->strings + names[i]; sts->st.ino=inode_numbers[i]; /* now the data is copied, we store the pointer. */ @@ -588,11 +588,14 @@ if (status == ENOENT) { STOPIF( wa__warn(WRN__LOCAL_VANISHED, status, - "Eintrag %s nicht mehr gefunden!", sts->name), NULL); - sts->entry_type=FT_IGNORE; + "Entry \"%s\" not found anymore!", sts->name), NULL); + sts->to_be_ignored=1; } else STOPIF( status, "lstat(%s)", sts->name); + + /* New entries get that set, because they're "updated". */ + sts->updated_mode=sts->st.mode; } diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/doc.g-c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/doc.g-c --- fsvs-1.1.14/src/doc.g-c 2008-04-02 06:24:33.000000000 +0100 +++ fsvs-1.1.17/src/doc.g-c 2008-10-29 08:19:27.000000000 +0000 @@ -1,5 +1,5 @@ /* This file is generated, do not edit! - * Last done on Tue Apr 1 13:06:50 2008 + * Last done on Wed Oct 29 08:15:27 2008 * */ @@ -26,6 +26,32 @@ " information this way!\n" "\n"; +const char hlp_delay[]=" This command delays execution until the time has passed at least to the\n" + " next second after writing the dir and urls files. So, where previously\n" + " the delay option was used, this can be substituted by the given command\n" + " followed by the delay command.\n" + "\n" + " The advantage is over the Waiting for a time change after working copy\n" + " operations option is, that read-only commands can be used in the\n" + " meantime.\n" + "\n" + " An example:\n" + " fsvs commit /etc/X11 -m \"Backup of X11\"\n" + " ... read-only commands, like \"status\"\n" + " fsvs delay /etc/X11\n" + " ... read-write commands, like \"commit\"\n" + "\n" + " In the testing framework it is used to save a bit of time; in normal\n" + " operation, where fsvs commands are not so tightly packed, it is\n" + " normally preferable to use the delay option.\n" + "\n"; + +const char hlp_cat[]=" fsvs cat [-r rev] path\n" + "\n" + " Fetches a file with the specified revision or, if not given, BASE, from\n" + " the repository, and outputs it to STDOUT.\n" + "\n"; + const char hlp_checko[]=" fsvs checkout [path] URL [URLs...]\n" "\n" " Sets one or more URLs for the current working directory (or the\n" @@ -72,10 +98,6 @@ " If you're currently in /etc , you can even drop the /etc/ in front, and\n" " just use the filenames.\n" "\n" - " This extended path handling on the commandline is not yet available for\n" - " every command. Most of them still expect you to be in the working copy\n" - " root.\n" - "\n" " Please see status for explanations on -v and -C . For advanced backup\n" " usage see also FSVS_PROP_COMMIT_PIPE.\n" "\n"; @@ -84,9 +106,9 @@ " fsvs cp dump\n" " fsvs cp load\n" "\n" - " This command marks DEST as a copy of SRC at revision rev, so that on\n" - " the next commit of DEST the corresponding source path is sent as copy\n" - " source.\n" + " The copy command marks DEST as a copy of SRC at revision rev, so that\n" + " on the next commit of DEST the corresponding source path is sent as\n" + " copy source.\n" "\n" " The default value for rev is BASE, ie. the revision the SRC (locally)\n" " is at.\n" @@ -222,6 +244,26 @@ " only the indicator ... is shown at the end.\n" "\n"; +const char hlp_uncp[]=" fsvs uncopy DEST [DEST ...]\n" + "\n" + " The uncopy command removes a copyfrom mark from the destination entry.\n" + " This will make the entry unknown again, and reported as New on the next\n" + " invocations.\n" + "\n" + " Only the base of a copy can be un-copied; if a directory structure was\n" + " copied, and the given entry is just implicitly copied, this command\n" + " will give you an error.\n" + "\n" + " This is not folded in revert, because it's not clear whether revert\n" + " should restore the original copyfrom data or remove the copy attribute;\n" + " by using a special command this is no longer ambiguous.\n" + "\n" + " Example:\n" + " $ fsvs copy SourceFile DestFile\n" + " # Whoops, was wrong!\n" + " $ fsvs uncopy DestFile\n" + "\n"; + const char hlp_diff[]=" fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...]\n" "\n" " This command gives you diffs between local and repository files.\n" @@ -247,6 +289,12 @@ "\n" " Please see also Options relating to the \"diff\" action and Using\n" " colordiff.\n" + "\n" + " Todo:\n" + " Two revisions diff is buggy in that it (currently) always\n" + " fetches the full tree from the repository; this is not only a\n" + " performance degradation, but you'll see more changed entries\n" + " than you want. This will be fixed.\n" "\n"; const char hlp_export[]=" fsvs export REPOS_URL [-r rev]\n" @@ -267,8 +315,8 @@ " similar function is available by using -h or -? after a command.\n" "\n"; -const char hlp_ignore[]=" fsvs ignore [prepend|append|at=n] pattern[s]\n" - " fsvs ignore dump|load\n" +const char hlp_ignore[]=" fsvs ignore dump|load\n" + " fsvs ignore [prepend|append|at=n] pattern [pattern ...]\n" "\n" " This command adds patterns to the end of the ignore list, or, with\n" " prepend , puts them at the beginning of the list. With at=x the\n" @@ -319,6 +367,35 @@ " the shell!\n" "\n"; +const char hlp_rign[]=" fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...]\n" + " fsvs ri [prepend|append|at=n] path-spec [path-spec ...]\n" + "\n" + " If you use more than a single working copy for the same data, it will\n" + " be stored in different paths - and that makes absolute ignore patterns\n" + " infeasible. But relative ignore patterns are anchored at the beginning\n" + " of the WC root - which is a bit tiring if you're deep in your WC\n" + " hierarchy and want to ignore some files.\n" + "\n" + " To make that easier you can use the rel-ignore (abbreviated as ri)\n" + " command; this converts all given path-specifications (that may include\n" + " wildcards as per the shell pattern specification above) to WC-relative\n" + " values before storing them.\n" + "\n" + " Example for /etc as working copy root:\n" + " fsvs rel-ignore '/etc/X11/xorg.conf.*'\n" + "\n" + " cd /etc/X11\n" + " fsvs rel-ignore 'xorg.conf.*'\n" + "\n" + " Both commands would store the pattern \"./X11/xorg.conf.*\".\n" + "\n" + " Note:\n" + " This works only for shell patterns.\n" + "\n" + " For more details about ignoring files please see the ignore command and\n" + " Using ignore patterns.\n" + "\n"; + const char hlp_info[]=" fsvs info [-R [-R]] [PATH...]\n" "\n" " Use this command to show information regarding one or more entries in\n" @@ -356,13 +433,12 @@ "\n" " The optional rev1 and rev2 can be used to restrict the revisions that\n" " are shown; if no values are given, the logs are given starting from\n" - " HEAD downwards.\n" + " HEAD downwards, and then a limit on the number of revisions is applied\n" + " (but see the limit option).\n" "\n" " If you use the -v -option, you get the files changed in each revision\n" " printed, too.\n" "\n" - " Currently at most 100 log messages are shown.\n" - "\n" " There is an option controlling the output format; see \"fsvs log\" output\n" " format.\n" "\n" @@ -371,7 +447,6 @@ " * Show revision for all URLs associated with a working copy? In which\n" " order?\n" " * A URL-parameter, to specify the log URL. (Name)\n" - " * Limit number of revisions shown?\n" "\n"; const char hlp_prop_g[]=" fsvs prop-get PROPERTY-NAME PATH...\n" @@ -400,7 +475,7 @@ "\n" " This command removes property value for the given path(s).\n" "\n" - " See also prop-set\n" + " See also prop-set.\n" "\n"; const char hlp_prop_l[]=" fsvs prop-list [-v] PATH...\n" @@ -441,8 +516,8 @@ " the copy source data.\n" " * An unmodified direct copy destination entry, and other uncommitted\n" " entries with special flags (manually added, or defined as copied),\n" - " are changed back to \"N\"ew -- the copy definition and the\n" - " special status is removed.\n" + " are changed back to \"N\"ew -- the copy definition and the special\n" + " status is removed.\n" " Please note that on implicitly copied entries (entries that are\n" " marked as copied because some parent directory is the base of a\n" " copy) cannot be un-copied; they can only be reverted to their\n" @@ -496,8 +571,8 @@ " * If the entry has been modified, the change is shown as 'C'.\n" " If the modification or status change timestamps (mtime, ctime) are\n" " changed, but the size is still the same, the entry is marked as\n" - " possibly changed (a question mark '?' is printed). See\n" - " opt_checksum.\n" + " possibly changed (a question mark '?' is printed) - but see change\n" + " detection for details.\n" " * The meta-data flag 'm' shows meta-data changes like properties,\n" " modification timestamp and/or the rights (owner, group, mode);\n" " depending on the -v/-q command line parameters, it may be splitted\n" @@ -529,7 +604,7 @@ "\n" " This is normally not needed; the use cases are\n" " * debugging and\n" - " * recovering from data loss in $FSVS_WAA (/var/spool/fsvs ).\n" + " * recovering from data loss in $FSVS_WAA.\n" "\n" " It is (currently) important if you want to backup two similar machines.\n" " Then you can commit one machine into a subdirectory of your repository,\n" @@ -560,7 +635,7 @@ " needed.\n" "\n"; -const char hlp_update[]=" ## invalid ## fsvs update [-r rev] [working copy base]\n" +const char hlp_update[]=" fsvs update [-r rev] [working copy base]\n" " fsvs update [-u url@rev ...] [working copy base]\n" "\n" " This command does an update on all specified URLs for the current\n" diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/dox/dev.dox /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/dox/dev.dox --- fsvs-1.1.14/src/dox/dev.dox 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/src/dox/dev.dox 2008-04-04 06:14:34.000000000 +0100 @@ -179,7 +179,7 @@ or it must be returned to the caller. Most of this is already defined in macros. -Typical function layout is like this (taken from waa.c): +Typical function layout is like this (taken from waa.c): \code int waa__make_info_link(char *directory, char *name, char *dest) { @@ -237,6 +237,12 @@ depending on the debug- and verbosity-flags given on the command line, a back trace too. +Another special case is output to \c STDOUT; if we get an error \c EPIPE +here, we pass it up to main() as \c -EPIPE (to avoid confusion with writing +some other data), where it gets ignored. To avoid printing an error message +this is hardcoded in the \c STOPIF() macros. + + Assertions should be checked by \c BUG_ON(condition, format_string, ...). This will cause a segmentation violation, which (for debug builds) will automatically attach a debugger (\c gdb, only if present on the system). diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/dox/HOWTO-BACKUP.dox /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/dox/HOWTO-BACKUP.dox --- fsvs-1.1.14/src/dox/HOWTO-BACKUP.dox 2008-01-31 07:26:26.000000000 +0000 +++ fsvs-1.1.17/src/dox/HOWTO-BACKUP.dox 2008-10-25 12:14:45.000000000 +0100 @@ -23,16 +23,18 @@ to have stored in your backup, and what should be left out. Depending on your system usage and environment you first have to decide: - - Do you only want to backup your data in \c /home? - - Less storage requirements - - In case of hardware crash the OS must be set up again - - Do you want to keep track of your configuration in \c etc? - - Very small storage overhead - - Not much use for backup/restore, but shows what has been changed - - Or do you want to backup your whole installation, from \c / on? - - Whole system versioned, restore is only a few commands - - Much more storage space needed - typically you'd need at least a few - GB free space. +
    +
  • Do you only want to backup your data in \c /home?
      +
    • Less storage requirements +
    • In case of hardware crash the OS must be set up again
    +
  • Do you want to keep track of your configuration in \c etc?
      +
    • Very small storage overhead +
    • Not much use for backup/restore, but shows what has been changed
    +
  • Or do you want to backup your whole installation, from \c / on?
      +
    • Whole system versioned, restore is only a few commands +
    • Much more storage space needed - typically you'd need at least a few + GB free space.
    +
The next few moments should be spent thinking about the storage space for the repository - will it be on the system harddisk, a secondary or an @@ -55,12 +57,13 @@ A fair bit of time should go to a small investigation which file patterns - and paths you \b not want to back-up. - - Backup files like \c *.bak, \c *~, \c *.tmp, and similar - - History files: .sh-history and similar in the home-directories - - Cache directories: your favourite browser might store many MB of cached + and paths you \b not want to back-up.
    +
  • Backup files like \c *.bak, \c *~, \c *.tmp, and similar +
  • History files: .sh-history and similar in the home-directories +
  • Cache directories: your favourite browser might store many MB of cached data in you home-directories - - Virtual system directories, like \c /proc and \c /sys, \c /dev/shmfs. +
  • Virtual system directories, like \c /proc and \c /sys, \c /dev/shmfs. +
@@ -78,14 +81,21 @@ Now you have to say what should be ignored - that'll differ depending on your needs/wishes. \code - fsvs ignore ./§**~ ./§**.tmp ./§**.bak + fsvs ignore './§**~' './§**.tmp' './§**.bak' fsvs ignore ./proc/ ./sys/ ./tmp/ fsvs ignore ./var/tmp/ ./var/spool/lpd/ - fsvs ignore ./var/log/*.gz - fsvs ignore ./etc/*.dpkg-dist - fsvs ignore ./etc/*.dpkg-old + fsvs ignore './var/log/§*.gz' + fsvs ignore ./var/run/ /dev/pts/ + fsvs ignore './etc/*.dpkg-dist' './etc/*.dpkg-new' + fsvs ignore './etc/*.dpkg-old' './etc/*.dpkg-bak' \endcode + \note \c /var/run is for transient files; I've heard reports that \ref revert + "reverting" files there can cause problems with running programs.\n + Similar for \c /dev/pts - if that's a \c devpts filesystem, you'll run into + problems on \ref update or \ref revert - as FSVS won't be allowed to create + entries in this directory. + Now you may find that you'd like to have some files encrypted in your backup - like \c /etc/shadow, or your \c .ssh/id_* files. So you tell fsvs to en/decrypt these files: @@ -111,44 +121,44 @@ \section howto_backup_usage Further use and maintenance The further usage is more or less the \c commit command from the last - section. - When do you have to do some manual work? - - When ignore patterns change. - - New filesystems that should be ignored, or would be ignored but + section. \n + When do you have to do some manual work?
    +
  • When ignore patterns change.
      +
    • New filesystems that should be ignored, or would be ignored but shouldn't - - You find that your favourite word-processor leaves many *.segv files - behind, and similar things - - If you get an error message from fsvs, check the arguments and retry. +
    • You find that your favorite word-processor leaves many *.segv files + behind, and similar things
    +
  • If you get an error message from fsvs, check the arguments and retry. In desperate cases (or just because it's quicker than debugging yourself) ask on dev [at] fsvs.tigris.org. +
\section howto_backup_restore Restoration in a working system Depending on the circumstances you can take different ways to restore - data from your repository. - - fsvs \ref export allows you to just dump some repository data + data from your repository.
    +
  • "fsvs export" allows you to just dump some repository data into your filesystem - eg. into a temporary directory to sort things out. - - Using fsvs \ref revert you can go to older revisions of a - given file inplace. \note Whole directory structures are planned, just - not done yet. - - Or you can do a fresh checkout - set an URL in an (empty) directory, +
  • Using "fsvs revert" you can get older revisions of a + given file, directory or directory tree inplace. \n +
  • Or you can do a fresh checkout - set an URL in an (empty) directory, and update to the needed revision. - - If everything else fails (no backup media with fsvs on it), you can use +
  • If everything else fails (no backup media with fsvs on it), you can use subversion commands (eg. \c export) to restore needed parts, and update the rest with fsvs. - +
\section howto_backup_recovery Recovery for a non-booting system In case of a real emergency, when your harddisks crashed or your filesystem was eaten and you have to re-partition or re-format, you - should get your system working again by - - booting from a knoppix or some other Live-CD (with \c fsvs on it), - - partition/format as needed, - - mount your harddisk partitions below eg. \c /mnt, - - and then recovering by + should get your system working again by
    +
  • booting from a knoppix or some other Live-CD (with \c fsvs on it), +
  • partition/format as needed, +
  • mount your harddisk partitions below eg. \c /mnt, +
  • and then recovering by
\code $ cd /mnt $ export FSVS_CONF=/etc/fsvs # if non-standard diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/dox/options.dox /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/dox/options.dox --- fsvs-1.1.14/src/dox/options.dox 2008-03-19 06:41:44.000000000 +0000 +++ fsvs-1.1.17/src/dox/options.dox 2008-10-29 08:19:15.000000000 +0000 @@ -7,12 +7,18 @@ FSVS understands some other options, that modify its behaviour. + \section o__list Short list of options FSVS currently knows:
    +
  • \c all_removed - \ref o_all_removed +
  • \c author - \ref o_author +
  • \c change_check - \ref o_chcheck
  • \c colordiff - \ref o_colordiff
  • \c commit_to - \ref o_commit_to
  • \c conflict - \ref o_conflict +
  • \c conf - \ref o_conf. +
  • \c config_dir - \ref o_configdir.
  • \c copyfrom_exp - \ref o_copyfrom_exp
  • \c debug_output - \ref o_debug_output
  • \c delay - \ref o_delay @@ -20,15 +26,19 @@
  • \c dir_sort - \ref o_dir_sort
  • \c empty_commit - \ref o_empty_commit
  • \c filter - \ref o_filter, but see \ref glob_opt_filter "-f". +
  • \c limit - \ref o_logmax
  • \c log_output - \ref o_logoutput
  • \c merge_prg, \c merge_opt - \ref o_merge
  • \c path - \ref o_opt_path
  • \c softroot - \ref o_softroot
  • \c stat_color - \ref o_status_color +
  • \c stop_on_change - \ref o_stop_change
  • \c warning - \ref o_warnings, but see \ref glob_opt_warnings "-W". +
  • \c waa - \ref o_waa.
+ \section o__prio Priorities for option setting The priorities are
    @@ -51,6 +61,7 @@ overridden if necessary. + \section o__examples Examples Using the commandline: @@ -69,6 +80,7 @@ \endcode + \section o_opt_path Displaying paths You can specify how paths printed by FSVS should look like; this is used @@ -78,11 +90,12 @@ There are several possible settings, of which one can be chosen via the \c path option. -- \anchor pd_wcroot \c wcroot \n +
      +
    • \anchor pd_wcroot \c wcroot \n This is the old, traditional FSVS setting, where all paths are printed relative to the working copy root. -- \anchor pd_parm \c parameter \n +
    • \anchor pd_parm \c parameter \n With this setting FSVS works like most other programs - it uses the first best-matching parameter given by the user, and appends the rest of the path.\n @@ -91,15 +104,17 @@ single run through the entries. So if some entry matches more than one parameter, it is printed using the first. \n -- \anchor pd_absolute \c absolute \n +
    • \anchor pd_absolute \c absolute \n All paths are printed in absolute form. This is useful if you want to paste them into other consoles without worrying whether the current directory matches. +
    The next two are nearly identical to \c absolute, but the beginning of paths are substituted by environment variables. This makes sense if you want the advantage of full paths, but have some of them abbreviated. -- \anchor pd_env \c environment \n +
      +
    • \anchor pd_env \c environment \n Match variables to directories after reading the known entries, and use this cached information. This is faster, but might miss the best case if new entries are found (which would not be checked against possible longer @@ -107,7 +122,7 @@ Furthermore, as this works via associating environment variables to entries, the environment variables must at least match the working copy base - shorter paths won't be substituted. -- \c full-environment \n +
    • \c full-environment \n Check for matches just before printing the path. \n This is slower, but find the best fit. \note The string of the environment variables must match a directory name; @@ -118,6 +133,7 @@ and similar which might differ between sessions. Maybe the allowed prefixes for the environment variables should be settable in the configuration. Opinions to the users mailing list, please. +
    Example, with \c / as working copy base: \code @@ -176,10 +192,11 @@ The diff is not done internally in FSVS, but some other program is called, to get the highest flexibility. -There are several option values: -- diff_prg: The executable name, default "diff". -- diff_opt: The default options, default "-pu". -- diff_extra: Extra options, no default. +There are several option values:
      +
    • diff_prg: The executable name, default "diff". +
    • diff_opt: The default options, default "-pu". +
    • diff_extra: Extra options, no default. +
    The call is done as \code @@ -192,21 +209,26 @@ If you need more flexibility, write a shell script and pass its name as \c diff_prg. +Very advanced users might be interested in \ref exp_env "exported environment +variables", too. + \section o_colordiff Using colordiff If you have \c colordiff installed on your system, you might be interested in the \c colordiff option. -It can take on of these values: -- \c no, \c off or \c false: Don't use \c colordiff. -- \c yes, \c true or \c on: If this option is set on the commandline, or - the output is a tty, pipe the output of the \c diff program (see \ref - o_diff) to \c colordiff. -- \c auto: Like yes, but don't throw an error if colordiff can't be - started; just pipe the data as-is to \c STDOUT. +It can take on of these values:
      +
    • \c no, \c off or \c false: Don't use \c colordiff. +
    • empty (default value): Try to use \c colordiff as executable, but don't +throw an error if it can't be started; just pipe the data as-is to \c +STDOUT. +
    • anything else: Pipe the output of the \c diff program (see \ref +o_diff) to the given executable.
    + +Please note that if \c STDOUT is not a tty (eg. is redirected into a file), +this option must be given on the command line to take effect. -The default value is \c auto. \section o_filter Filtering entries @@ -219,6 +241,58 @@ \endcode +\section o_all_removed Trimming the list of deleted entries + +If you remove a directory, all entries below are implicitly known to be +deleted, too. To make the \ref status output shorter there's the \c +all_removed option; which, if set to \c yes, will cause children of removed +entries to be omitted. + +Example for the config file: +\code + all_removed=yes +\endcode + + +\section o_chcheck Change detection + +This options allows to specify the trade-off between speed and accuracy. + +A file with a changed size can immediately be known as changed; but if only +the modification time is changed, this is not so easy. Per default FSVS +does a MD5 check on the file in this case; if you don't want that, or if +you want to do the checksum calculation for \b every file (in case a file +has changed, but its mtime not), you can use this option to change FSVS' +behaviour. + +On the command line there's a shortcut for that: for every \c "-C" another +check in this option is chosen. + + +The recognized specifications are + +
    none Resets the check bitmask to "no checks". +
    file_mtime Check files for modifications via MD5 if the +mtime is different - default +
    dir Check all directories for new entries - this happens +normally if a directory ha +
    allfiles Check \b all files with MD5 for changes (\c +tripwire -like operation). +
    full All available checks. +
    + + +You can give multiple options; they're accumulated unless overridden by \c +none. +\code + fsvs -o change_check=allfiles status +\endcode + + +\note \a commit and \a update set additionally the \c dir option, to avoid +missing new files. + + \section o_warnings Setting warning behaviour Please see the command line parameter \ref glob_opt_warnings "-W", which is @@ -229,10 +303,11 @@ \endcode + \section o_softroot Using an alternate root directory This is a path that is prepended to \c $FSVS_WAA and \c $FSVS_CONF -(or their default values, see \ref envs), if they do not already +(or their default values, see \ref waa_files), if they do not already start with it, and it is cut off for the directory-name MD5 calculation. When is that needed? Imagine that you've booted from some Live-CD like @@ -246,16 +321,19 @@ This is used for recovery; see the example in \ref howto_backup_recovery. -So how does this work? -- The internal data paths derived from \c $FSVS_WAA and \c $FSVS_CONF use - the value given for \c softroot as a base directory, if they do not - already start with it. - (If that creates a conflict for you, eg. in that you want to use \c /var - as the \c softroot, and your \c $FSVS_WAA should be \c /var/fsvs, you can - make the string comparison fail by using /./var for either path.) -- When a directory name for \c $FSVS_CONF or \c $FSVS_WAA is derived from - some file path, the part matching \c softroot is cut off, so that the - generated names match the situation after rebooting. +So how does this work?
      + +
    • The internal data paths derived from \c $FSVS_WAA and \c $FSVS_CONF use +the value given for \c softroot as a base directory, if they do not already +start with it. \n +(If that creates a conflict for you, eg. in that you want to use \c /var as +the \c softroot, and your \c $FSVS_WAA should be \c /var/fsvs, you can make +the string comparison fail by using /./var for either path.) + +
    • When a directory name for \c $FSVS_CONF or \c $FSVS_WAA is derived from +some file path, the part matching \c softroot is cut off, so that the +generated names match the situation after rebooting. +
    Previously you'd have to \ref export your data back to the filesystem and call \ref urls "fsvs urls" and \c fsvs \ref sync-repos @@ -268,18 +346,40 @@ \c chroot() into the given directory (or boot with it as \c /), you'll want this set. +\note As this value is used for finding the correct working copy root (by +trying to find a \ref o_conf "conf-path", it cannot be set from a per-wc +config file. Only the environment, global configuration or command line +parameter make sense. + + + +\section o_logmax "fsvs log" revision limit + +There are some defaults for the number of revisions that are shown on a +"fsvs log" command:
      +
    • 2 revisions given (-rX:Y): \c abs(X-Y)+1, ie. all revisions in +that range. +
    • 1 revision given: exactly that one. +
    • no revisions given: from \c HEAD to 1, with a maximum of 100. +
    + +So this command is mostly useful to get more than the default number of +revisions on when running without revision arguments, or to get fewer. + + \section o_logoutput "fsvs log" output format You can modify aspects of the \ref log "fsvs log" output format by setting -the \c log_output option to a combination of these flags: -- \c color: This uses color in the output, similar to \c cg-log - (cogito-log); the header and separator lines are highlighted. +the \c log_output option to a combination of these flags:
      +
    • \c color: This uses color in the output, similar to \c cg-log +(cogito-log); the header and separator lines are highlighted. \note This uses ANSI escape sequences, and tries to restore the default color; if you know how to do that better (and more compatible), please tell the developer mailing list. -- \c indent: Additionally you can shift the log message itself a space to - the right, to make the borders clearer. +
    • \c indent: Additionally you can shift the log message itself a space to +the right, to make the borders clearer. +
    Furthermore the value \c normal is available; this turns off all special handling. @@ -290,6 +390,7 @@ difference to the \ref o_filter option, which is cumulating. + \section o_status_color Status output coloring \c FSVS can colorize the output of the status lines; removed entries will @@ -303,6 +404,7 @@ though. + \section o_dir_sort Directory sorting If you'd like to have the output of \ref status sorted, you can use the @@ -310,9 +412,35 @@ \c FSVS will do a run through the tree, to read the status of the entries, and then go through it again, but sorted by name. (See dir_enumerator().) -\note If \c fsvs aborts with an error during \ref status output, you might -want to turn this option off again (eg. on the commandline with \c --odir_sort=no) to see where \c fsvs stops. +\note If \c FSVS aborts with an error during \ref status output, you might +want to turn this option off again, to see where \c fsvs stops; the easiest +way is on the commandline with \c -odir_sort=no. + + + +\section o_author Author + +You can specify an author to be used on commit. +This option has a special behaviour; if the first character of +the value is an \c '$', the value is replaced by the environment +variable named. + +Empty strings are ignored; that allows an \c /etc/fsvs/config like this: +\code + author=unknown + author=$LOGNAME + author=$SUDO_USER +\endcode +where the last non-empty value is taken; +and if your \c .authorized_keys has lines like +\code + environment="FSVS_AUTHOR=some_user" ssh-rsa ... +\endcode +that would override the config values. + +\note Your \c sshd_config needs the \c PermitUserEnvironment setting; you can +also take a look at the \c AcceptEnv and \c SendEnv documentation. + \section o_commit_to Destination URL for commit @@ -327,6 +455,8 @@ fsvs ci /etc/passwd -m "New user defined" -ocommit_to=local \endcode + + \section o_debug_output Destination for debug output See \ref glob_opt_deb "-d". @@ -337,6 +467,7 @@ \endcode + \section o_empty_commit Doing empty commits In the default settings FSVS will happily create empty commits, ie. @@ -353,6 +484,7 @@ \endcode + \section o_copyfrom_exp Avoiding expensive compares on \ref cpfd "copyfrom-detect" If you've got big files that are seen as new, doing the MD5 comparison can @@ -364,6 +496,7 @@ \endcode + \section o_delay Waiting for a time change after working copy operations If you're using \c fsvs in automated systems, you might see that changes @@ -384,6 +517,21 @@ \endcode + +\section o_stop_change Stopping status reports as soon as changes are found + +If you want to use \c FSVS in scripts, you might simply want to know whether +anything was changed. + +For this use the \c stop_on_change option, possibly combined with +\ref o_filter : +\code + fsvs -o stop_change=yes st /etc + fsvs -o stop_change=yes -o filter=text status /etc/init.d +\endcode + + + \section o_conflict How to resolve conflicts on update If you start an update, but one of the entries that was changed in the @@ -427,14 +575,16 @@ automatically get ignored). + \section o_merge Options regarding the "merge" programm Like with \ref o_diff "diff", the \c merge operation is not done internally in FSVS. -To have better control -- merge_prg: The executable name, default "merge". -- merge_opt: The default options, default "-A". +To have better control
      +
    • merge_prg: The executable name, default "merge". +
    • merge_opt: The default options, default "-A". +
    The option \c "-p" is always used: \code @@ -442,5 +592,38 @@ \endcode + +\section o_conf Path definitions for the config and WAA area + +\anchor o_waa + +The paths given here are used to store the persistent configuration data +needed by FSVS; please see \ref waa_files and \ref o__prio for more +details, and the \ref o_softroot parameter as well as the \ref +howto_backup_recovery for further discussion. + +\code + FSVS_CONF=/home/user/.fsvs-conf fsvs -o waa=/home/user/.fsvs-waa st +\endcode + +\note Please note that these paths can be given \b only as environment +variables (\c $FSVS_CONF resp. \c $FSVS_WAA) or as command line parameter; +settings in config files are ignored. + + +\section o_configdir Configuration directory for the subversion libraries + +This path specifies where the subversion libraries should take their +configuration data from; the most important aspect of that is authentication +data, especially for certificate authentication. + +The default value is \c $FSVS_CONF/auth/. + +\c /etc/fsvs/config could have eg. +\code + config_dir=/root/.subversion +\endcode + + */ // vi: filetype=doxygen spell spelllang=en_gb formatoptions+=ta : diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/dox/statii.dox /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/dox/statii.dox --- fsvs-1.1.14/src/dox/statii.dox 2008-03-19 06:41:44.000000000 +0000 +++ fsvs-1.1.17/src/dox/statii.dox 2008-08-02 18:04:48.000000000 +0100 @@ -93,10 +93,11 @@ New -> CopiedU [label="copy", URL="\ref cp"]; + CopiedU -> New [label="uncopy", URL="\ref uncp"]; + { edge [ color=blue, URL="\ref revert", tooltip="revert"]; - CopiedU -> New; CopiedC -> CopiedU; Changed -> Committed; Deleted -> Committed; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/dox/TIPS_TRICKS.dox /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/dox/TIPS_TRICKS.dox --- fsvs-1.1.14/src/dox/TIPS_TRICKS.dox 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/src/dox/TIPS_TRICKS.dox 2008-08-01 06:18:28.000000000 +0100 @@ -0,0 +1,45 @@ +/** + \defgroup tips Tips and tricks + \ingroup userdoc + + This is a list of tips and tricks that you might find useful. + + + \section tip_verbose Seeing the verbose status, but only changed entries + + Sometimes the status \ref status_meta_changed "meta-data changed" + is not enough - the differentiation between \c mtime and the permission + attributes is needed. + + For that the command line option \ref glob_opt_verb "-v" is used; but + this \e verbose mode also prints all entries, not only the changed. + + To solve that the \ref glob_opt_filter "filter option" gets set; with the + value \c none (to reset the mask), and then with the wanted mask - to + restore the default the string \c "text,meta" could be set. + + Example: + \code + $ fsvs status -v -f none,text,meta + $ fsvs status -v -f none,text,meta /etc + $ fsvs status -v -f none,text,meta some/dir another_dir and_a_file + \endcode + + + \section tip_perf Performance points + + Some effort has been taken to get \c fsvs as fast as possible. + + With 1.1.17 the default for checking for changes on files was altered, + to do a MD5-check of files with a changed modification time but the same + size (to avoid printing a \c "?" \ref status_possibly "as status"); + if that affects your use-case badly you can use the \ref o_chcheck + "option" to get the old (fast) behavior. + + Please note that not the whole file has to be read - the first changed + manber block (with averaged 128kB) terminates the check. + +*/ + +// vi: filetype=doxygen spell spelllang=en_us + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/doxygen-data/Doxyfile /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/doxygen-data/Doxyfile --- fsvs-1.1.14/src/doxygen-data/Doxyfile 2008-01-06 15:27:32.000000000 +0000 +++ fsvs-1.1.17/src/doxygen-data/Doxyfile 2008-10-10 18:34:00.000000000 +0100 @@ -38,7 +38,7 @@ # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. -OUTPUT_DIRECTORY = ../../www/doxygen/ +OUTPUT_DIRECTORY = ../../doxygen/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output @@ -1211,7 +1211,7 @@ # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. -DOT_IMAGE_FORMAT = png +DOT_IMAGE_FORMAT = gif # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/doxygen-data/Doxyfile-man /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/doxygen-data/Doxyfile-man --- fsvs-1.1.14/src/doxygen-data/Doxyfile-man 2008-01-31 07:26:26.000000000 +0000 +++ fsvs-1.1.17/src/doxygen-data/Doxyfile-man 2008-10-10 18:34:00.000000000 +0100 @@ -38,7 +38,7 @@ # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. -OUTPUT_DIRECTORY = ../../www/doxygen/ +OUTPUT_DIRECTORY = ../../doxygen/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/est_ops.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/est_ops.c --- fsvs-1.1.14/src/est_ops.c 2008-04-02 06:25:13.000000000 +0100 +++ fsvs-1.1.17/src/est_ops.c 2008-10-25 12:16:24.000000000 +0100 @@ -37,10 +37,10 @@ * */ struct free_estat { - /** Number of "struct estat"s that can be stored here. */ - int count; /** Next free block(s) */ struct free_estat *next; + /** Number of "struct estat"s that can be stored here. */ + int count; }; @@ -67,33 +67,20 @@ 9+1+9+1+NAME_MAX+1+1) -/** Startstring for links in the repository. +/** -. * * It's a bit unaesthetical that devices use a " " for the repository data, * but a ":" in the waa as delimiter. * But "link " is specified in subversion, and having the repository data * different would not be better. * So we just allow both at parsing, and use the "right" for each target. */ -static const char link_spec[]="link "; +const char link_spec[]="link ", + cdev_spec[]="cdev", + bdev_spec[]="bdev"; static struct free_estat *free_list = NULL; - -/** -. - * Depending on \c st->mode one of the \c FT_* constants is returned. */ -int ops___filetype(struct sstat_t *st) -{ - /* in order of most probable to least */ - if (S_ISREG(st->mode)) return FT_FILE; - if (S_ISDIR(st->mode)) return FT_DIR; - if (S_ISLNK(st->mode)) return FT_SYMLINK; - if (S_ISCHR(st->mode)) return FT_CDEV; - if (S_ISBLK(st->mode)) return FT_BDEV; - - /* socket, pipe - should we do them, too ? */ - return FT_IGNORE; -} - + /** -. * @@ -103,25 +90,39 @@ { int maj, min; int ft, mode; - char type, delimiter; + char delimiter; int status; status=0; - if (0 == strncmp(data, link_spec, 5)) + if (0 == strncmp(data, link_spec, strlen(link_spec))) { - ft=FT_SYMLINK; mode=S_IFLNK; - *info=data+5; + if (info) + *info=data+5; } else { + if (0 == strncmp(data, cdev_spec, strlen(cdev_spec))) + { + data+=strlen(cdev_spec); + mode=S_IFCHR; + } + else if (0 == strncmp(data, bdev_spec, strlen(bdev_spec))) + { + data+=strlen(bdev_spec); + mode=S_IFBLK; + } + else mode=0; + if (info) *info=NULL; - ft=sscanf(data, "%cdev%c0x%X:0x%X", - &type, &delimiter, &maj, &min); - STOPIF_CODE_ERR(ft != 4 || - (delimiter != ':' && delimiter != ' ') || - (type != 'c' && type != 'b'), EINVAL, + + ft=sscanf(data, "%c0x%X:0x%X", &delimiter, &maj, &min); + + STOPIF_CODE_ERR(mode == 0 || + ft != 3 || + (delimiter != ':' && delimiter != ' '), + EINVAL, "'%s' is not parseable as a special description", data); #ifdef DEVICE_NODES_DISABLED @@ -129,12 +130,9 @@ #else sts->st.rdev=MKDEV(maj, min); #endif - mode = type == 'c' ? S_IFCHR : S_IFBLK; - ft = type == 'c' ? FT_CDEV : FT_BDEV; } - sts->st.mode= (sts->st.mode & ~S_IFMT) | mode; - sts->entry_type = ft; + sts->updated_mode=sts->st.mode= (sts->st.mode & ~S_IFMT) | mode; ex: return status; @@ -142,14 +140,20 @@ /** -. - * The subversion header string for special nodes is prepended. */ + * The subversion header string for special nodes is prepended. + * + * The returned pointer in \a *erg must not be free()d. */ int ops__link_to_string(struct estat *sts, char *filename, char **erg) { + static struct cache_t *cache=NULL; char *cp; int l, status, hlen; + STOPIF( cch__new_cache(&cache, 4), NULL); + + status=0; BUG_ON(!S_ISLNK(sts->st.mode)); @@ -158,8 +162,7 @@ hlen=strlen(link_spec); l=sts->st.size + hlen + 1 + 8; - cp=malloc(l); - STOPIF_ENOMEM(!cp); + STOPIF( cch__add(cache, 0, NULL, l, &cp), NULL); strcpy(cp, link_spec); STOPIF_CODE_ERR( readlink(filename, cp+hlen, sts->st.size) == -1, @@ -179,17 +182,14 @@ /* I'm not fully sure about that. */ BUG_ON(!(sts->remote_status & FS_NEW) && - sts->entry_type != FT_BDEV && - sts->entry_type != FT_CDEV, - "%s: type is 0x%x, mode is 0%o", - sts->name, - sts->entry_type, sts->st.mode); + !(S_ISBLK(sts->updated_mode) || S_ISCHR(sts->updated_mode)), + "%s: mode is 0%o", sts->name, sts->st.mode); #ifdef DEVICE_NODES_DISABLED DEVICE_NODES_DISABLED(); #else sprintf(buffer, "%s%c0x%x:0x%x", - S_ISBLK(sts->st.mode) ? "bdev" : "cdev", + S_ISBLK(sts->st.mode) ? bdev_spec : cdev_spec, delimiter, (int)MAJOR(sts->st.rdev), (int)MINOR(sts->st.rdev)); @@ -257,10 +257,8 @@ file_status |= FS_META_UMODE; /* both of same type ? */ - ft_old = ops___filetype(old); - ft_new = ops___filetype(new); - - sts->entry_type = ft_new; + ft_old = old->mode & S_IFMT; + ft_new = new->mode & S_IFMT; if (ft_old != ft_new) { @@ -269,18 +267,19 @@ } /* same type - compare */ + BUG_ON(sts->to_be_ignored); switch (ft_new) { - case FT_CDEV: - case FT_BDEV: - DEBUGP("olu=%llu new=%llu", (t_ull)old->rdev, (t_ull)new->rdev); + case S_IFBLK: + case S_IFCHR: + DEBUGP("old=%llu new=%llu", (t_ull)old->rdev, (t_ull)new->rdev); file_status |= (old->rdev == new->rdev) ? FS_NO_CHANGE : FS_REPLACED; break; - case FT_SYMLINK: - case FT_FILE: + case S_IFLNK: + case S_IFREG: if (old->size != new->size) file_status |= FS_CHANGED; else @@ -291,7 +290,7 @@ file_status |= FS_LIKELY; break; - case FT_DIR: + case S_IFDIR: /* This entry *could* be changed. * But as the changed flag is set if a child entry is missing * or if new entries are found, but never cleared, we don't set @@ -301,12 +300,14 @@ file_status |= FS_LIKELY; break; - case FT_IGNORE: + default: + BUG_ON(1); +// case FT_IGNORE: file_status=FS_NO_CHANGE; } ex: - DEBUGP("change: types 0x%x vs 0x%x; 0x%x=%s", + DEBUGP("change: types 0%o vs 0%o; 0x%x=%s", ft_old, ft_new, file_status, st__status_string_fromint(file_status)); @@ -365,7 +366,7 @@ sts->st.dev=dev; sts->st.ino=this_ino; sts->st.size=size; - sts->st.mode=mode; + sts->updated_mode=sts->st.mode=mode; sts->old_rev = sts->repos_rev; /* The %n are not counted on glibc. @@ -408,7 +409,6 @@ "Parsing the md5 failed"); } - sts->entry_type = ops___filetype( &(sts->st) ); /* Skip over exactly one space - else we'd loose information about * filenames starting with whitespaces. */ @@ -713,8 +713,8 @@ dir->entry_count += count; dir->by_inode[dir->entry_count]=NULL; - /* Re-sort the index */ - status=dir__sortbyinode(dir); + /* Re-sort the index next time it's needed. */ + dir->to_be_sorted=1; ex: return status; @@ -722,6 +722,9 @@ /** -. + * + * This function doesn't return \c ENOENT, if no entry is found; \a *sts + * will just be \c NULL. * */ int ops__find_entry_byname(struct estat *dir, const char *name, struct estat **sts, @@ -748,10 +751,10 @@ if (sts_p) DEBUGP("found %s on %p; ignored: 0x%x", name, sts_p, - (*sts_p)->entry_type); + (*sts_p)->to_be_ignored); /* don't return removed entries, if they're not wanted */ - *sts=sts_p && (ignored_too || (*sts_p)->entry_type != FT_IGNORE) ? + *sts=sts_p && (!(*sts_p)->to_be_ignored || ignored_too) ? *sts_p : NULL; if (!*sts) @@ -866,8 +869,18 @@ DEBUGP("no free list, allocating"); /* No more free entries in free list. Allocate. */ returned=needed; + /* Allocate at least a certain block size. */ + if (needed < 8192/sizeof(**where)) + needed=8192/sizeof(**where); *where=calloc(needed, sizeof(**where)); STOPIF_ENOMEM(!*where); + + if (needed > returned) + { + free_list=(struct free_estat*)(*where+returned); + free_list->next=NULL; + free_list->count=needed-returned; + } } DEBUGP("giving %d blocks at %p", returned, *where); @@ -897,7 +910,9 @@ status=0; - if (S_ISDIR(sts->st.mode)) + if (sts->old) + STOPIF( ops__free_entry(& sts->old), NULL); + if (S_ISDIR(sts->updated_mode)) { BUG_ON(sts->entry_count && !sts->by_inode); @@ -907,7 +922,7 @@ IF_FREE(sts->by_inode); IF_FREE(sts->by_name); IF_FREE(sts->strings); - sts->st.mode=0; + sts->updated_mode=0; } /* Clearing the memory here serves no real purpose; @@ -1077,7 +1092,7 @@ /** -. - * An entry is marked by having \c entry_type==FT_IGNORE; and such entries + * An entry is marked by having estat::to_be_ignored set; and such entries * are removed here. * * If \a fast_mode is set, the entries are get removed from the list are @@ -1098,7 +1113,7 @@ new_count=0; for(i=0; ientry_count; i++) { - if ((*src)->entry_type != FT_IGNORE) + if (!(*src)->to_be_ignored) { *dst=*src; dst++; @@ -1229,7 +1244,6 @@ sts->st.mode=S_IFDIR | 0700; sts->st.size=0; sts->entry_count=0; - sts->entry_type=FT_DIR; sts->parent=current; /* Add that directory with the next commit. */ sts->flags=sts_flags | RF_ISNEW; @@ -1252,32 +1266,37 @@ /** -. - * \a fullpath is optional; if not set, the path is generated. * * The parent directory should already be done, so that removal of whole * trees is done without doing unneeded \c lstat()s. * - * Depending on \c opt_checksum a file might be checked for changes by a - * MD5 comparision. + * Depending on \c o_chcheck a file might be checked for changes by a MD5 + * comparision. * * Per default \c only_check_status is not set, and the data from \c * lstat() is written into \a sts. Some functions need the \b old values - * and can set this flag; then only \c entry_status is modified. */ -int ops__update_single_entry(struct estat *sts, char *fullpath) + * and can set this flag; then only \c entry_status is modified. + * + * If \a output is not NULL, then it is overwritten, and \a sts->st is not + * changed - independent of \c only_check_status. In case of a removed + * entry \a *output is not changed. */ +int ops__update_single_entry(struct estat *sts, struct sstat_t *output) { int status; struct sstat_t st; int i; + char *fullpath; + - /* now get the path, and stat() */ - if (!fullpath) - STOPIF( ops__build_path(&fullpath, sts), NULL); + STOPIF( ops__build_path(&fullpath, sts), NULL); /* If we see that the parent has been removed, there's no need * to check this entry - the path will surely be invalid. */ if (sts->parent) if (sts->parent->entry_status & FS_REMOVED) + { goto removed; + } /* Check for current status */ status=hlp__lstat(fullpath, &st); @@ -1293,6 +1312,10 @@ STOPIF(status, "cannot lstat(%s)", fullpath); removed: + /* Re-set the values, if needed */ + if (st.mode) + memset(&st, 0, sizeof(st)); + sts->entry_status=FS_REMOVED; /* Only ENOENT gets here, and that's ok. */ status=0; @@ -1303,81 +1326,121 @@ sts->entry_status=ops__stat_to_action(sts, &st); /* May we print a '?' ? */ - if ( (opt_checksum==1 && (sts->entry_status & FS_LIKELY)) || - (opt_checksum>1) ) + if ( ((opt__get_int(OPT__CHANGECHECK) & CHCHECK_FILE) && + (sts->entry_status & FS_LIKELY)) || + (opt__get_int(OPT__CHANGECHECK) & CHCHECK_ALLFILES) ) { - switch (sts->entry_type) + /* If the type changed (symlink => file etc.) there's no 'likely' - + * the entry *was* changed. + * So if we get here, we can check either type - st or sts->st. */ + if (S_ISREG(st.mode) || S_ISLNK(st.mode)) { - case FT_FILE: - case FT_SYMLINK: - /* make sure, one way or another */ - STOPIF( cs__compare_file(sts, fullpath, &i), NULL); - - sts->entry_status = i ? - (sts->entry_status & ~ FS_LIKELY) | FS_CHANGED : - sts->entry_status & ~(FS_LIKELY | FS_CHANGED); - break; - case FT_DIR: - /* Will be checked later, on last child of this directory. */ - break; - case FT_BDEV: - case FT_CDEV: - break; - default: - BUG_ON(1, "Undefined entry type!"); + /* make sure, one way or another */ + STOPIF( cs__compare_file(sts, fullpath, &i), NULL); + + if (i>0) + sts->entry_status= (sts->entry_status & ~ FS_LIKELY) | FS_CHANGED; + else if (i==0) + sts->entry_status= sts->entry_status & ~(FS_LIKELY | FS_CHANGED); } + /* Directories will be checked later, on finishing their children; + * devices have already been checked, and other types are not + * allowed. */ } + } - - /* Now we've compared we take the new values. - * Better for display, needed for commit (current values) */ - /* Before an update we only set ->entry_status - to keep the old values - * intact. */ + /* Now we've compared we take the new values. + * Better for display, needed for commit (current values) */ + /* Before an update (and some other operations) we only set + * sts->entry_status - to keep the old values intact. */ + if (output) + *output=st; + else if (!only_check_status) sts->st=st; - } - DEBUGP("known %s: action=%X, flags=%X, status=%d", - fullpath, sts->entry_status, sts->flags, status); + DEBUGP("known %s: action=%X, flags=%X, mode=0%o, status=%d", + fullpath, sts->entry_status, sts->flags, sts->updated_mode, status); + sts->updated_mode=st.mode; ex: return status; } +/** Set the estat::do_* bits, depending on the parent. + * May not be called for the root. + * */ +inline void ops___set_todo_bits(struct estat *sts) +{ + /* For recursive operation: If we should do the parent completely, we do + * the sub-entries, too. */ + if (opt_recursive>0) + sts->do_userselected |= sts->parent->do_userselected; + /* For semi-recursive operation: Do the child, if the parent was + * wanted. */ + if (opt_recursive>=0) + sts->do_this_entry |= sts->parent->do_userselected | sts->do_userselected; +} + + /** -. - * Must have a parent! */ -int ops__set_to_handle_bits(struct estat *sts) + * May not be called for the root. */ +int ops__set_todo_bits(struct estat *sts) { int status; status=0; - DEBUGP("before parent: do_tree=%d.%d parent=%d.%d", - sts->do_tree, - sts->do_this_entry, - sts->parent ? sts->parent->do_tree : 0, + /* We don't know any better yet. */ + sts->do_filter_allows=1; + sts->do_filter_allows_done=1; + + ops___set_todo_bits(sts); + + DEBUGP("user,this,child=%d.%d parent=%d.%d", + sts->do_userselected, + sts->do_this_entry, + sts->parent ? sts->parent->do_userselected : 0, sts->parent ? sts->parent->do_this_entry : 0); - /* For recursive operation: If we should do the parent completely, - * we do the sub-entries, too. */ - if (opt_recursive>0) - sts->do_tree |= sts->parent->do_tree; - /* For semi-recursive operation: Do the child, if the parent was - * wanted. */ - if (opt_recursive>=0) - sts->do_this_entry |= sts->parent->do_tree | sts->do_tree; + return status; +} - DEBUGP("after parent: do_tree=%d.%d parent=%d.%d", - sts->do_tree, - sts->do_this_entry, - sts->parent ? sts->parent->do_tree : 0, - sts->parent ? sts->parent->do_this_entry : 0); +/** -. + * + * Calls \c ops__set_to_handle_bits() and maybe \c + * ops__update_single_entry(), and depending on the filter settings \c + * sts->do_this_entry might be cleared. + * */ +int ops__update_filter_set_bits(struct estat *sts) +{ + int status; + struct sstat_t stat; + + if (sts->parent) + STOPIF( ops__set_todo_bits(sts), NULL); + + if (sts->do_this_entry) + { + STOPIF( ops__update_single_entry(sts, &stat), NULL); + + if (ops__calc_filter_bit(sts)) + { + /* We'd have an invalid value if the entry is removed. */ + if ((sts->entry_status & FS_REPLACED) != FS_REMOVED) + if (!only_check_status) + sts->st = stat; + } + } + + DEBUGP("filter says %d", sts->do_filter_allows); + +ex: return status; } - /** -. * * We have to preserve the \c parent pointer and the \c name of \a dest. @@ -1432,9 +1495,8 @@ dest->path_len=0; dest->path_level=dest->parent->path_level+1; - /* The entry is not marked as FT_IGNORE ... that would change the entry - * type, and we have to save it anyway. */ - dest->entry_type=src->entry_type; + /* The entry is not marked as to-be-ignored ... that would change the + * entry type, and we have to save it anyway. */ dest->entry_status=FS_NEW; dest->remote_status=FS_NEW; @@ -1442,7 +1504,7 @@ dest->decoder_is_correct=src->decoder_is_correct; dest->was_output=0; - dest->do_tree=dest->do_a_child=dest->do_this_entry=0; + dest->do_userselected = dest->do_child_wanted = dest->do_this_entry = 0; dest->arg=NULL; } @@ -1537,3 +1599,96 @@ return status; } + +/** -. + * The specified stream gets rewound, read up to \a max bytes (sane default + * for 0), and returned (zero-terminated) in \a *buffer allocated in \a + * pool. + * + * The real length can be seen via \a real_len. + * + * If \a filename is given, the file is removed. + * + * If \a pool is \c NULL, the space is \c malloc()ed and must be \c free()d + * by the caller. + * */ +/* mmap() might be a bit faster; but for securities' sake we put a \0 at + * the end, which might not be possible with a readonly mapping (although + * it should be, by using MAP_PRIVATE - but that isn't available with + * apr_mmap_create(), at least with 1.2.12). */ +int ops__read_special_entry(apr_file_t *a_stream, + char **data, + int max, ssize_t *real_len, + char *filename, + apr_pool_t *pool) +{ + int status; + apr_off_t special_len, bof; + apr_size_t len_read; + char *special_data; + + + status=0; + special_len=0; + + + /* Remove temporary file. Can be done here because we still have the + * handle open. */ + if (filename) + STOPIF_CODE_ERR( unlink(filename) == -1, errno, + "Cannot remove temporary file \"%s\"", filename); + + + /* Get length */ + STOPIF( apr_file_seek(a_stream, APR_CUR, &special_len), NULL); + + /* Some arbitrary limit ... */ + if (!max) max=8192; + STOPIF_CODE_ERR( special_len > max, E2BIG, + "!The special entry \"%s\" is too long (%llu bytes, max %llu).\n" + "Please contact the dev@ mailing list.", + filename, (t_ull)special_len, (t_ull)max); + + + /* Rewind */ + bof=0; + STOPIF( apr_file_seek(a_stream, APR_SET, &bof), NULL); + + special_data= pool ? + apr_palloc( pool, special_len+1) : + malloc(special_len+1); + STOPIF_ENOMEM(!special_data); + + + /* Read data. */ + len_read=special_len; + STOPIF( apr_file_read( a_stream, special_data, &len_read), NULL); + STOPIF_CODE_ERR( len_read != special_len, ENODATA, + "Reading was cut off at byte %llu of %llu", + (t_ull)len_read, (t_ull)special_len); + special_data[len_read]=0; + + DEBUGP("got special value %s", special_data); + + if (real_len) *real_len=special_len; + *data=special_data; + +ex: + return status; +} + + +/** -. + * */ +int ops__are_children_interesting(struct estat *dir) +{ + struct estat tmp; + + tmp.parent=dir; + tmp.do_this_entry = tmp.do_userselected = tmp.do_child_wanted = 0; + + ops___set_todo_bits(&tmp); + + return tmp.do_this_entry; +} + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/est_ops.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/est_ops.h --- fsvs-1.1.14/src/est_ops.h 2008-01-24 06:57:54.000000000 +0000 +++ fsvs-1.1.17/src/est_ops.h 2008-10-25 12:11:00.000000000 +0100 @@ -12,6 +12,7 @@ #include "global.h" #include "waa.h" +#include "options.h" /** \file * Functions for handling of indiviual struct estats. */ @@ -22,8 +23,6 @@ struct estat *sts); /** Calculate the length of the path for this entry. */ int ops__calc_path_len(struct estat *sts); -/** Get the filetype from \c st->mode. */ -int ops___filetype(struct sstat_t *st); /** Compare the \c struct \c sstat_t , and set the \c entry_status. */ int ops__stat_to_action(struct estat *sts, struct sstat_t *new); @@ -69,7 +68,9 @@ int ops__load_1entry(char **where, struct estat *sts, char **filename, ino_t *parent_i); /** Does a \c lstat() on the given entry, and sets the \c entry_status. */ -int ops__update_single_entry(struct estat *sts, char *fullpath); +int ops__update_single_entry(struct estat *sts, struct sstat_t *output); +/** Wrapper for \c ops__update_single_entry and some more. */ +int ops__update_filter_set_bits(struct estat *sts); /** Converts a string describing a special node to the \c struct \c sstat_t * data. */ @@ -80,6 +81,12 @@ /** See \c ops__dev_to_waa_string(), but uses a space character (\c \\x20 ) * for subversion compatibility. */ char *ops__dev_to_filedata(struct estat *sts); +/** Reads a file. */ +int ops__read_special_entry(apr_file_t *a_stream, + char **data, + int max, ssize_t *real_len, + char *filename, + apr_pool_t *pool); /** Reads a symlink and returns a pointer to its destination. */ int ops__link_to_string(struct estat *sts, char *filename, @@ -96,10 +103,33 @@ int flags, int sts_flags, struct estat **ret); -/** Set the \ref estat::do_tree and \ref estat::do_this_entry attributes - * depending on \ref opt_recursive and the parent's bits. */ -int ops__set_to_handle_bits(struct estat *sts); +/** Set the \ref estat::do_userselected and \ref estat::do_this_entry + * attributes depending on \ref opt_recursive and the parent's bits. */ +int ops__set_todo_bits(struct estat *sts); +/** Determines whether child entries of this entry should be done, based on + * the recursive settings and \a dir's todo-bits. */ +int ops__are_children_interesting(struct estat *dir); + +inline static int ops__allowed_by_filter(struct estat *sts) +{ +#ifdef ENABLE_DEBUG + BUG_ON(!sts->do_filter_allows_done, + "%s: do_filter_allows not done", sts->name); +#endif + return sts->do_filter_allows; +} + +inline static int ops__calc_filter_bit(struct estat *sts) +{ + sts->do_filter_allows_done=1; + + sts->do_filter_allows = + opt__get_int(OPT__FILTER) == FILTER__ALL || + /* or it's an interesting entry. */ + (sts->entry_status & opt__get_int(OPT__FILTER)); + return sts->do_filter_allows; +} /** Correlating entries from two directories \a dir_a and \a dir_B. * @{ */ @@ -117,5 +147,31 @@ ops__correlate_fn2_t for_every); /** @} */ + +/** Startstrings for links in the repository. */ +extern const char link_spec[], + cdev_spec[], + bdev_spec[]; + +#define ops__mark_childchanged(start, field) \ +do { \ + register struct estat *_s=(start); \ + while (_s && !(_s->field & FS_CHILD_CHANGED)) \ + { \ + _s->field |= FS_CHILD_CHANGED; \ + _s=_s->parent; \ + } \ +} while (0) + +#define ops__mark_parent_cc(changed_entry, field) \ + ops__mark_childchanged(changed_entry->parent, field) + +#define ops__mark_changed_parentcc(changed_entry, field) \ +do { \ + changed_entry->field |= FS_CHANGED; \ + ops__mark_parent_cc(changed_entry, field); \ +} while (0) + + #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/export.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/export.c --- fsvs-1.1.14/src/export.c 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/export.c 2008-06-03 05:21:05.000000000 +0100 @@ -149,21 +149,15 @@ status_svn=NULL; current_url=url; - STOPIF( url__open_session(&session), NULL); + STOPIF( url__open_session(NULL), NULL); rev=url->target_rev; /* See the comment in update.c */ - if (rev == SVN_INVALID_REVNUM) - { - STOPIF_SVNERR( svn_ra_get_latest_revnum, - (session, &rev, global_pool)); - DEBUGP("HEAD is at %ld", rev); - } - + STOPIF( url__canonical_rev(current_url, &rev), NULL); /* export files */ STOPIF_SVNERR( svn_ra_do_update, - (session, + (current_url->session, &reporter, &report_baton, opt_target_revision, diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/fsvs.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/fsvs.c --- fsvs-1.1.14/src/fsvs.c 2008-04-02 06:25:13.000000000 +0100 +++ fsvs-1.1.17/src/fsvs.c 2008-10-25 12:12:13.000000000 +0100 @@ -40,10 +40,6 @@ * */ -/** \defgroup perf Performance points - * Some effort has been taken to get \c fsvs as fast as possible. - * */ - /** \defgroup add_unv_ign Adding and removing entries from versioning * * Normally all new entries are taken for versioning. @@ -90,11 +86,11 @@ * * \section cmds_au Defining which entries to take: *
    - *
    \ref ignore
    Define ignore patterns + *
    \ref ignore and \ref rign
    Define ignore patterns *
    \ref unversion
    Remove entries from versioning *
    \ref add
    Add entries that would be ignored *
    \ref cp, \ref mv
    Tell \c fsvs that entries were - * copied. + * copied *
    * * \section cmds_rep Commands working with the repository: @@ -103,12 +99,14 @@ *
    \ref update
    Get updates from the repository *
    \ref checkout
    Fetch some part of the repository, and * register it as working copy - *
    \ref revert
    Undo local changes + *
    \ref cat
    Get a file from the directory + *
    \ref revert and \ref uncp
    Undo local changes and + * entry markings *
    \ref remote-status
    Ask what an \ref update * would bring * * - * \section cmds_prop Property handling + * \section cmds_prop Property handling: *
    *
    \ref prop-set
    Set user-defined properties *
    \ref prop-get
    Ask value of user-defined properties @@ -124,6 +122,10 @@ * * \note Multi-url-operations are relatively new; there might be rough edges. * + * + * The return code is \c 0 for success, or \c 2 for an error. + * \c 1 is returned if the option \ref o_stop_change is used, and + * changes are found; see also \ref o_filter. * * * \section glob_opt Universal options @@ -176,26 +178,8 @@ * * * \subsection glob_opt_chksum -C -- checksum - * \c -C increments the checksum flag. - * Normally \a status tells that a file has \b possible modification, if - * its mtime has changed but its size not. - * Using \c -C you can tell the commands to be extra careful and \b always - * check for modifications. - * - * The values are - * - *
    0 Normal operations - *
    1 Check files for modifications if possibly changed - *
    2 Do an MD5 verification for all files, and check all - * directories for new entries. - *
    - * - * If a files size has changed, we can be sure that it's changed; - * a directory is checked for changes if any of its meta-data has changed - * (mtime, ctime, owner, group, size, mode). - * - * \note \a commit and \a update set the checksum flag to at least - * 1, to avoid missing changed files. + * \c -C chooses to use more change detection checks; please see \ref + * o_chcheck "the change_check option" for more details. * * * \subsection glob_opt_filter -f -- filter entries @@ -203,15 +187,14 @@ * operations, modification of the work done on given entries. * * It requires a specification at the end, which can be any combination of - * \c any, \c text, \c new, \c deleted, \c meta, \c mtime, \c group or \c - * owner. + * \c any, \c text, \c new, \c deleted (or \c removed), \c meta, \c mtime, \c group, \c mode, + * \c changed or \c owner. * * By giving eg. the value \c text, with a \ref status action only entries * that are new or changed are shown; with \c mtime,group only entries * whose group or modification time has changed are printed. * - * \note The list does not include \b possibly changed entries; see \ref - * glob_opt_chksum \c -C. + * \note Please see \ref o_chcheck for some more information. * * \note If an entry gets replaced with an entry of a different type (eg. a * directory gets replaced by a file), that counts as \c deleted \b and \c @@ -307,14 +290,14 @@ * simply a whitespace-separated list of option specifications. * * - * \subsection glob_opt_urls -u URLname[@revision] -- select URLs + * \subsection glob_opt_urls -u URLname[@revision[:revision]] -- select URLs * - * Some commands' operations can be reduced to a subset of defined URLs; - * the \ref update command is the best example. + * Some commands can be reduced to a subset of defined URLs; + * the \ref update command is a example. * - * If you have more than a single URL in use for your working copy, and \c - * update updates \b all entries from \b all URLs. By using this parameter - * you can tell FSVS to update only a single URL. + * If you have more than a single URL in use for your working copy, \c + * update normally updates \b all entries from \b all URLs. By using + * this parameter you can tell FSVS to update only the specified URLs. * * The parameter can be used repeatedly; the value can have multiple URLs, * separated by whitespace or one of \c ",;". @@ -322,9 +305,13 @@ * \code * fsvs up -u base_install,boot@32 -u gcc * \endcode + * * This would get \c HEAD of \c base_install and \c gcc, and set the target * revision of the \c boot URL at 32. * + * \note The second revision specification will be used for eg. the \ref + * diff command; but this is not yet implemented. + * * * \subsection glob_options -o [name[=value]] -- other options * This is used for setting some seldom used option, for which default can @@ -345,10 +332,8 @@ only_check_status=0, /** -. We start with recursive by default. */ opt_recursive=1, - opt_verbose=0, - opt_checksum=0; + opt_verbose=0; -svn_ra_session_t *session; svn_revnum_t target_revision; svn_revnum_t opt_target_revision=SVN_INVALID_REVNUM; svn_revnum_t opt_target_revision2=SVN_INVALID_REVNUM; @@ -475,7 +460,10 @@ * * In case the first character of the \a format is a "!", it's a * user error - here we normally print only the message, without the error - * code line. The full details are available via \c -d and \c -v. */ + * code line. The full details are available via \c -d and \c -v. + * + * \c -EPIPE is handled specially, in that it is passed up, but no message + * is printed. */ int _STOP(const char *file, int line, const char *function, int errl, const char *format, ...) { @@ -491,6 +479,7 @@ if (make_STOP_silent) return errl; + if (errl==-EPIPE) return errl; is_usererror= format && *format == '!'; if (is_usererror) format++; @@ -560,7 +549,7 @@ /** For keyword expansion - the version string. */ const char* Version(FILE *output) { - static const char Id[] ="$Id: fsvs.c 1578 2008-04-02 05:25:13Z pmarek $"; + static const char Id[] ="$Id: fsvs.c 1953 2008-10-25 11:12:13Z pmarek $"; fprintf(output, "FSVS (licensed under the GPLv3), (C) by Ph. Marek;" " version " FSVS_VERSION "\n"); @@ -650,7 +639,7 @@ int argc UNUSED, char *argv[]) { int status; - int i, hpos; + int i, hpos, len; char const* const*names; @@ -690,9 +679,10 @@ hpos=2; for(i=0; i= 75) + if (hpos+2+len >= 75) { printf("\n "); hpos=2; @@ -700,6 +690,7 @@ printf("%s%s", action_list[i].name[0], i+1 == action_list_count ? "\n" : ", "); + hpos += 2 + len; } puts( @@ -717,9 +708,9 @@ "Environment variables:\n" "\n" "$FSVS_CONF defines the location of the FSVS Configuration area\n" - " Default is /etc/fsvs, but any writeable directory is allowed.\n" + " Default is " DEFAULT_CONF_PATH ", but any writeable directory is allowed.\n" "$FSVS_WAA defines the location of the Working copy Administrative Area\n" - " Default is /var/spool/fsvs, but any writeable directory is allowed.\n" + " Default is " DEFAULT_WAA_PATH ", but any writeable directory is allowed.\n" ); } @@ -819,6 +810,7 @@ static char *charp_array_2[10]; static char **charpp; static char buffer[1024]; + static struct estat *estat_array[10]; int_array[0]=fileno(stdin); voidp_array[0]=stdin+fileno(stdout); @@ -831,6 +823,7 @@ case 9: return voidp_array; case 6: return buffer; case 2: return charp_array_1; + case 3: return estat_array; case 7: return charpp; case 8: return charp_array_2; } @@ -939,7 +932,7 @@ int status, help; char *cmd; svn_error_t *status_svn; - int eo_args; + int eo_args, i; void *mem_start, *mem_end; @@ -1017,7 +1010,6 @@ STOPIF( opt__load_env(environ), NULL); STOPIF( waa__save_cwd(&start_path, &start_path_len, 0), NULL); - STOPIF( wa__init(), NULL); if (!isatty(STDOUT_FILENO)) opt__set_int( OPT__STATUS_COLOR, PRIO_PRE_CMDLINE, 0); @@ -1028,8 +1020,12 @@ root.name=root.strings=strdup("."); root.st.size=0; root.st.mode=S_IFDIR | 0700; - root.entry_type=FT_DIR; root.entry_count=0; + /* The problem is that the root entry is never done explicitly; so we + * have to hard-code that here; but it is the default for all entries + * anyway. */ + root.do_filter_allows=1; + root.do_filter_allows_done=1; while (1) @@ -1072,9 +1068,27 @@ STOPIF( wa__set_warn_option(optarg, PRIO_CMDLINE), "Warning option '%s' is invalid", optarg); break; + case 'C': - opt_checksum++; + /* Find the rightmost 0 bit, and set it. */ + + i = opt__get_int(OPT__CHANGECHECK); + /* Algorithm for finding the rightmost 1 bit: + * orig i= ... x 0 1 1 1 + * XOR i+1 ... x 1 0 0 0 + * gives ... 0 1 1 1 1 + * AND i+1 ... 0 1 0 0 0 + * + * Maybe there's an easier way ... don't have "Numerical Recipes" + * here with me. */ + i = (i ^ (i+1)) & (i+1); + + DEBUGP("checksum bits %X | %X", + opt__get_int(OPT__CHANGECHECK), i); + opt__set_int(OPT__CHANGECHECK, PRIO_CMDLINE, + opt__get_int(OPT__CHANGECHECK) | i); break; + case 'o': STOPIF( opt__parse( optarg, NULL, PRIO_CMDLINE, 0), "!Cannot parse option string '%s'.", optarg); @@ -1188,17 +1202,20 @@ action=action_list+0; } - DEBUGP("optind=%d per_sts=%d action=%s rec=%d", + DEBUGP("optind=%d per_sts=%d action=%s rec=%d filter=%s", optind, (int)sizeof(root), action->name[0], - opt_recursive); + opt_recursive, + st__status_string_fromint( opt__get_int(OPT__FILTER)) ); for(eo_args=1; eo_argswork(&root, argc-optind, args+optind), @@ -1246,8 +1262,10 @@ mem_end=sbrk(0); DEBUGP("memory stats: %p to %p, %llu KB", mem_start, mem_end, (t_ull)(mem_end-mem_start)/1024); - if (status) - return 1; + if (status == -EPIPE) + DEBUGP("got EPIPE, ignoring."); + else if (status) + return 2; _DEBUGP(NULL, 0, NULL, NULL); diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/global.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/global.h --- fsvs-1.1.14/src/global.h 2008-04-02 06:25:13.000000000 +0100 +++ fsvs-1.1.17/src/global.h 2008-10-29 07:19:09.000000000 +0000 @@ -84,11 +84,12 @@ /** Data storage for ignore patterns. */ struct ignore_t { /** The pattern string as given by the user, including flags. */ - char *pattern, - /** The calculated pattern string. - * Does no longer include the flags (like \e take), and shell syntax - * is converted to PCRE. */ - *compare_string; + char *pattern; + + /** The calculated pattern string. + * Does no longer include the flags (like \e take), and shell syntax + * is converted to PCRE. */ + char *compare_string; union { /* for shell and pcre */ @@ -111,7 +112,7 @@ unsigned short path_level; /** Flag telling whether this shell pattern has a \c ** in it. * \todo Would be used with ignore_t::path_level. */ - int has_wildwildcard:1; + unsigned int has_wildwildcard:1; }; /** For device compares */ @@ -136,17 +137,24 @@ }; }; - /** Is this an ignore or take pattern? - * \a 0 = take, \a 1 = ignore */ - int is_ignore:1; + /** AND-value for mode matching, or \c 0 for not chosen. */ + unsigned short mode_match_and; + /** CMP-value for mode matching. */ + unsigned short mode_match_cmp; + + /** Should this match only directories? */ + unsigned int dir_only:1; + /** Is this an ignore or take pattern? \a 0 = take, \a 1 = ignore */ + unsigned int is_ignore:1; /** Ignore case for comparing? */ - int is_icase:1; + unsigned int is_icase:1; /** Is it an \e internally generated pattern (for the WAA area)? * Internal patterns are not saved and not printed. */ - int is_user_pat:1; - /** Which type is this pattern? \see Pattern types. */ + unsigned int is_user_pat:1; + + /** Which type is this pattern? See \ref PatTypes. */ /* This is at the end because of alignment issues. */ - unsigned char type; + unsigned int type:3; }; /** Whether the device compare should be @@ -172,17 +180,22 @@ struct url_t { /** The URL itself (http:// or svn:// or similar) */ char *url; + /** The user-given priority; need not be unique. + * The lower the number, the higher the priority. */ + int priority; /** The length of the URL, not counting the \c \\0. */ int urllen; /** The revision we'd like that URL to be at - normally HEAD. */ - svn_revnum_t target_rev; + svn_revnum_t target_rev; + /** The revision the user gave for this command for this URL. + * Normally equals \c target_rev. */ + svn_revnum_t current_target_rev; /** The revision number this URL is currently at. */ svn_revnum_t current_rev; + /** The \c HEAD revision, or \c SVN_INVALID_REVNUM if not yet known. */ + svn_revnum_t head_rev; /** The user-given symbolic name */ char *name; - /** The user-given priority; need not be unique. - * The lower the number, the higher the priority. */ - int priority; /** The number which is used in the dir-lists to reference this url_t. * Must be unique in the URL-list. * @@ -200,7 +213,9 @@ apr_pool_t *pool; /** Flag saying whether this URL should be done. * Should not be queried directly, but by using url__to_be_handled(). */ - int to_be_handled; + int to_be_handled:1; + /** Whether the user gave a specific override revision number. */ + int current_target_override:1; }; @@ -221,10 +236,21 @@ * no alignment problem (which would arise if there was eg. a 16bit type, * then space would be wasted) */ struct sstat_t { - /** Device number */ - dev_t dev; - /** Inode */ - ino_t ino; + /* For easier comparison, we overlay an 64bit type. */ + union { + /** The modification time as \c seconds, \c microseconds. */ + struct timespec mtim; + /** The same value in a single integer value. + * \deprecated Currently unused. */ + unsigned long long _mtime; + }; + union { + /** The creation time as \c seconds, \c microseconds. */ + struct timespec ctim; + /** The same value in a single integer value. + * \deprecated Currently unused. */ + unsigned long long _ctime; + }; union { /** The size in bytes (for files, symlinks and directories). */ @@ -233,6 +259,11 @@ dev_t rdev; }; + /** Device number of \b host filesystem. */ + dev_t dev; + /** Inode */ + ino_t ino; + /** The access mode (like \c 0700, \c 0755) with all other (non-mode) * bits, ie S_IFDIR. */ mode_t mode; @@ -241,22 +272,6 @@ uid_t uid; /** The group number. */ gid_t gid; - - /* For easier comparison, we overlay an 64bit type. */ - union { - /** The modification time as \c seconds, \c microseconds. */ - struct timespec mtim; - /** The same value in a single integer value. - * \deprecated Currently unused. */ - unsigned long long _mtime; - }; - union { - /** The creation time as \c seconds, \c microseconds. */ - struct timespec ctim; - /** The same value in a single integer value. - * \deprecated Currently unused. */ - unsigned long long _ctime; - }; }; @@ -266,52 +281,62 @@ * The name comes from extended struct stat. * This structure is used to build the tree of entries that we're processing. * - * \note Some moving of members may be necessary to get optimal alignment. */ + * We need both a local and a remote status, to see on update when there + * might be a conflict. \todo Single status, and check entries on-time? + */ struct estat { - /** Meta-data of this entry. */ - struct sstat_t st; - + /** The parent of this entry, used for tree walking. + * Must be \c NULL for the root entry and the root entry alone. */ + struct estat *parent; /** Name of this entry. */ char *name; + /** Meta-data of this entry. */ + struct sstat_t st; + /** Revision of this entry. Currently only the value in the root entry is * used; this will be moved to \c * \ref url and removed from here. */ svn_revnum_t repos_rev; /** The revision number before updating. */ svn_revnum_t old_rev; - /** The parent of this entry, used for tree walking. - * Must be \c NULL for the root entry and the root entry alone. */ - struct estat *parent; - /** The URL this entry is from. * Will be used for multi-url updates. */ struct url_t *url; + /** If an entry gets removed, the old version is remembered (if needed) + * via the \c old pointer (eg to know which children were known and may + * be safely removed). */ + struct estat *old; + /** Data about this entry. */ - struct { + union { /** For files */ struct { - /** MD5-hash of the repository version. - * While committing it is set to the \e new MD5, and saved - * with \a waa__output_tree(). */ - md5_digest_t md5; - /** Flag whether this entry has changed or not changed - * (as per MD5/manber-compare), or if this is unknown yet. - * See \ref ChgFlag. */ - int change_flag; /** The decoder string from fsvs:update-pipe. Only gets set if * action->needs_decoder != 0. */ char *decoder; + /** MD5-hash of the repository version. While committing it is set + * to the \e new MD5, and saved with \a waa__output_tree(). */ + md5_digest_t md5; /** Whether we got an "original" MD5 from the repository to compare. * */ - int has_orig_md5:1; + unsigned int has_orig_md5:1; + /** Flag whether this entry has changed or not changed (as per + * MD5/manber-compare), or if this is unknown yet. + * See \ref ChgFlag. */ + unsigned int change_flag:2; }; - /** For directories */ + /** For directories. + * The by_inode and by_name members are positioned so that they collide + * with the \c md5 file member above - in case of incorrect file types + * that's safer, as they'll contain invalid pointers instead of (the + * valid) \c decoder. */ struct { - /** How many entries this directory has. */ - unsigned entry_count; - + /** Name storage space for sub- (and sub-sub- etc.) entries. + * Mainly used in the root inode, but is used in newly found directories + * too. \c NULL for most directory entries. */ + char *strings; /** List of child entries. * Sorted by inode number, NULL-terminated. * Always valid. */ @@ -321,17 +346,24 @@ * May be NULL if not currently used; can be (re-)generated by calling * dir__sortbyname(). */ struct estat **by_name; - /** Name storage space for sub- (and sub-sub- etc.) entries. - * Mainly used in the root inode, but is used in newly found directories - * too. \c NULL for most directory entries. */ - char *strings; + + /** How many entries this directory has. */ + AC_CV_C_UINT32_T entry_count; + + /** Used to know when this directories' children are finished. + * Counts the number of unfinished subdirectories. + * This is volatile and should be in the union below (with \ref + * estat::child_index), but as it's only used for directories it + * conserves memory to keep it here. */ + AC_CV_C_UINT32_T unfinished; + /** This flag is set if any child is *not* at the same revision, * so this directory has to be descended on reporting. */ - int other_revs:1; + unsigned int other_revs:1; /** If this bit is set, the directory has to be re-sorted before * being written out -- it may have new entries, which are not in * the correct order. */ - int to_be_sorted:1; + unsigned int to_be_sorted:1; /* Currently unused - see ignore.c. */ #if 0 struct ignore_t **active_ign; @@ -347,46 +379,52 @@ /** This entries' baton. */ void *baton; }; - /** Update for a file. */ + /** Export for a file. */ struct { /** The pool used for the filehandles; for a discussion see \ref FHP. */ apr_pool_t *filehandle_pool; }; - /** Update for a special entry. */ + /** Export of a special entry. */ struct { /** String-buffers for special entries. * While a file is \b always streamed to disk, special entries are * \b always done in memory. */ - svn_stringbuf_t *stringbuf_src, - *stringbuf_tgt; + svn_stringbuf_t *stringbuf_tgt; }; struct { /** Used in waa__input_tree() and waa__update_tree(). */ - unsigned child_index; - /** Local pool while updating a directory. - * For the children in a directory we have to create subpools in \ref - * up__apply_textdelta(); the local \c pool argument is too - * short-lived, they have to have at least the lifetime of their - * parent directories. */ - apr_pool_t *dir_pool; + AC_CV_C_UINT32_T child_index; }; /* output_tree */ struct { - unsigned file_index; + AC_CV_C_UINT32_T file_index; }; }; /** \name Common variables for all types of entries. */ - /** Flags for this entry. See \ref EntFlags for constant definitions. */ - int flags; + /** Which argument causes this path to be done. */ + char *arg; - /** Length of path up to here. Does not include the \c \\0. See \ref - * ops__calc_path_len. */ - unsigned short path_len:14; + /** Stored user-defined properties as \c name=>svn_string_t, if \c + * action->keep_user_prop is set. + * Allocated in a subpool of \c estat::url->pool, so that it's still + * available after cb__record_changes() returns. + * The subpool is available from a hash lookup with key "" (len=0). */ + apr_hash_t *user_prop; + + /** Updated unix mode from ops__update_single_entry(). + * See the special \ref fsvsS_constants below. + * \todo Strip that to only the needed bits, ie. the ones (used) in \c + * S_IFMT, and make estat compact, by putting between the bitfields. + * Convention: has \b always to be set according to the current type of + * the entry (to account for the shared members, eg. by_inode); where + * estat::st.mode has the \e original value, as seen by the repository. */ + mode_t updated_mode; + + /** Flags for this entry. See \ref EntFlags for constant definitions. */ + AC_CV_C_UINT32_T flags; - /** What kind of entry this is - see \ref ent_types. */ - unsigned int entry_type:6; /** Local status of this entry - \ref fs_bits. */ unsigned int entry_status:10; @@ -398,32 +436,58 @@ * value here has range of [1 .. MAX_CACHED_PATHS] instead of * the usual [0 .. MAX_CACHED_PATHS-1]. */ unsigned int cache_index:6; + + /** Length of path up to here. Does not include the \c \\0. See \ref + * ops__calc_path_len. */ + unsigned short path_len:16; + /** At which level is this path? The wc root is at level 0, its children * at 1, and so on. */ unsigned short path_level:9; /** Whether this entry was already printed. \todo Remove by changing the * logic. */ - int was_output:1; + unsigned int was_output:1; /** This flag tells whether the string for the decoder is surely correct. * It is currently used for updates; after we parse the properties in * cb__record_changes(), we'll have the correct value. */ - int decoder_is_correct:1; + unsigned int decoder_is_correct:1; /** Flag saying whether this entry was specified by the user on the * command line. */ - int do_tree:1; - /** Like \a FS_CHILD_CHANGED - children of this entry must be handled. - * \todo substitute by \a FS_CHILD_CHANGED? */ - int do_a_child:1; - /** Flag derived from parents' \ref estat::do_tree. + unsigned int do_userselected:1; + /** Says that a child of this entry was given by the user on the + * commandline. + * Unlike \a FS_CHILD_CHANGED, which is set if some child has \e actually + * changed, this just says that we have to check. */ + unsigned int do_child_wanted:1; + /** Flag derived from parents' \ref estat::do_userselected. * Set for \b all entries which should be handled. */ - int do_this_entry:1; - - /** Which argument causes this path to be done. */ - char *arg; + unsigned int do_this_entry:1; + /** Flag saying whether the \c "-f" filter condition applies. + * Normally set in \ref ops__set_todo_bits(), can be cleared in \ref + * ops__update_filter_set_bits(). */ + unsigned int do_filter_allows:1; + /** Flag used for debugging. If estat::do_filter_allows is queried + * without being defined earlier, we trigger a \ref BUG(). + * Was conditionalized on \c ENABLE_DEBUG - but that got ugly. */ + unsigned int do_filter_allows_done:1; + + /** Whether this entry should not be written into the \ref dir "entry + * list", and/or ignored otherwise. */ + unsigned int to_be_ignored:1; }; +/** \anchor fsvsS_constants Special FSVS file type constants. + * @{ */ +#define S_IFUNDEF (0) +/** All sockets get filtered out when the directory gets read, so we can + * safely reuse that value for the case where we don't know \b what kind of + * special entry that is (eg when receiving \c "svn:special" from the + * repository). */ +#define S_IFANYSPECIAL S_IFSOCK +#define S_ISANYSPECIAL S_ISSOCK +/** @} */ /** \anchor EntFlags Various flags for entries. * @@ -471,22 +535,6 @@ #define RF___IS_COPY (RF_COPY_BASE | RF_COPY_SUB) -/** \anchor ent_types Entry types. - * Historically derived from filetypes. */ -#define FT_UNKNOWN (0) -#define FT_DIR (1) -#define FT_FILE (2) -#define FT_CDEV (4) -#define FT_BDEV (8) -#define FT_SYMLINK (16) -// sockets ? -#define FT_IGNORE (32) -#define FT_ANY (FT_DIR | FT_FILE | FT_CDEV | FT_BDEV | FT_SYMLINK | FT_IGNORE) -#define FT_NONDIR (FT_ANY & ~FT_DIR) -#define FT_ANYSPECIAL (FT_NONDIR & ~FT_FILE) - -#define FT__MASK (FT_ANY) - /** \name File statii. * \anchor fs_bits @@ -518,8 +566,8 @@ FS_META_GROUP | FS_META_UMODE) /** This flag on a directory entry says that the directory itself was - * not changed, but some child, so this directories children have to be - * checked for modifications. */ + * not changed, but some child, so the children of this directory + * have to be checked for modifications. */ #define FS_CHILD_CHANGED (1 << 9) #define FS__CHANGE_MASK (FS_NEW | FS_REMOVED | FS_CHANGED | \ @@ -563,42 +611,42 @@ /** Declaration of the debug function. */ extern void _DEBUGP(const char *file, int line, const char *func, char *format, ...) __attribute__ ((format (printf, 4, 5) )); -/** The macro used for printing debug messages. - * Includes time, file, line number and function name. - * Allows filtering via opt_debugprefix. - * \note Check for \ref PrintfTypes "argument sizes". */ + /** The macro used for printing debug messages. + * Includes time, file, line number and function name. + * Allows filtering via opt_debugprefix. + * \note Check for \ref PrintfTypes "argument sizes". */ #define DEBUGP(...) _DEBUGP(__FILE__, __LINE__, __PRETTY_FUNCTION__, __VA_ARGS__) #endif -/** \name Error-printing and -handling functions. - * - * Except for the subversion-library wrapper macros they need exactly this - * function layout: - * - * \code - * int some_function( ... some args ... ) - * { - * int status; - * - * STOPIF( function_call(1, 2, "a"), - * "String describing the error situation with %s", - * "parameters as needed"); - * - * ex: - * cleanups(); - * return status; - * } - * \endcode - * - * It works by checking the return value; if it is not zero, a - * goto ex is done. At this mark some cleanup is possible. */ -/** @{ */ -/** A flag to turn error printing temporarily off. - * This is useful where entire calltrees would have to be equipped with - * some \c silent parameter. */ -extern int make_STOP_silent; -/** Master error function. */ + /** \name Error-printing and -handling functions. + * + * Except for the subversion-library wrapper macros they need exactly this + * function layout: + * + * \code + * int some_function( ... some args ... ) + * { + * int status; + * + * STOPIF( function_call(1, 2, "a"), + * "String describing the error situation with %s", + * "parameters as needed"); + * + * ex: + * cleanups(); + * return status; + * } + * \endcode + * + * It works by checking the return value; if it is not zero, a + * goto ex is done. At this mark some cleanup is possible. */ + /** @{ */ + /** A flag to turn error printing temporarily off. + * This is useful where entire calltrees would have to be equipped with + * some \c silent parameter. */ + extern int make_STOP_silent; + /** Master error function. */ extern int _STOP(const char *file, int line, const char *function, int errl, const char *format, ...) __attribute__ ((format (printf, 5, 6) )); @@ -637,14 +685,14 @@ * \param code The status code to check. * All other things are hardcoded. */ #define STOPIF(code, ... ) \ - do \ + do \ { \ - status=code; \ - if (status) \ - { \ + status=(code); \ + if (status) \ + { \ _STOP(__FILE__, __LINE__, __PRETTY_FUNCTION__, status, __VA_ARGS__); \ goto ex; \ - } \ + } \ } while (0) /** A simplified error call macro for returning ENOMEM. * \code @@ -655,6 +703,21 @@ * \endcode * */ #define STOPIF_ENOMEM(cond) STOPIF_CODE_ERR(cond, ENOMEM, NULL) +/** An error return macro that is used for user output - special handling + * \c EPIPE to get a silent return. + * If \c code returns something negative (like printf, puts, putc ... do; + * \c EOF is defined as \c -1), and \a error is \c EPIPE, go on with \c + * -EPIPE. */ +#define STOPIF_CODE_EPIPE(code, ...) \ + do \ +{ \ + if ((code) < 0) \ + { \ + status=errno; \ + if (status == EPIPE) status= -EPIPE; \ + STOPIF(status, "Error writing output"); \ + } \ +} while (0) /** \page svnlibwrap Subversion library calls wrapper. * If this is used in some function, an additional variable is needed: @@ -676,18 +739,12 @@ * \endcode */ /** The master error macro for calling subversion functions. */ -#define STOPIF_SVNERR_EXTRA(func, parm, fmt, ...)\ - do \ -{ \ - status_svn=func parm; \ - if (status_svn) \ - { \ - status=status_svn->apr_err; \ - _STOP(__FILE__, __LINE__, __PRETTY_FUNCTION__, \ - status_svn->apr_err, \ - fmt ": %s", ## __VA_ARGS__, status_svn->message);\ - goto ex; \ - } \ +#define STOPIF_SVNERR_TEXT(func, parm, fmt, ...) \ + do \ +{ \ + status_svn=func parm; \ + STOPIF_CODE_ERR( status_svn, status_svn->apr_err, \ + fmt ": %s", ## __VA_ARGS__, status_svn->message); \ } while (0) /* The mainly used function wrapper. * \param func Name of the subversion function @@ -696,10 +753,7 @@ * STOPIF_SVNERR( svn_ra_initialize, (global_pool)); * \endcode */ -#define STOPIF_SVNERR(func, parm) STOPIF_SVNERR_TEXT(func, #func, parm) -/** The same as STOPIF_SVNERR(), but with a variable printed function name. - * Used in case the function is called indirectly; see STOPIF_SVNERR_INDIR. */ -#define STOPIF_SVNERR_TEXT(func, text, parm) STOPIF_SVNERR_EXTRA(func, parm, text) +#define STOPIF_SVNERR(func, parm) STOPIF_SVNERR_TEXT(func, parm, #func) /** Convert the svn_error_t into a message and a returnable integer. */ #define STOP_HANDLE_SVNERR(svnerr) STOPIF_CODE_ERR_GOTO(svnerr, svnerr->apr_err, ex2, (const char*)svnerr->message) @@ -717,7 +771,7 @@ /** Makes the program abort. * If the configure had --enable-debug and \c gdb is in the path, try * to use \c gdb to debug this problem (only if STDIN and STDOUT are ttys). */ -#define BUG(...) do { debuglevel=1; DEBUGP(__VA_ARGS__); *(int*)42=__LINE__; } while (0) +#define BUG(...) do { fflush(NULL); debuglevel=1; DEBUGP(__VA_ARGS__); *(int*)42=__LINE__; } while (0) /** The same as BUG(), but conditionalized. * \code * BUG_ON(a == b, "HELP") @@ -759,97 +813,93 @@ * A list of variables that can be set by commandline parameters or * environment variables; these are used in nearly every action. */ /** @{ */ -/** If the user wants to make sure whether files have been modified - * (and the modification time check is not enough). - * Makes checking slower, as data gets MD5ed. */ -extern int opt_checksum, - /** Greater than zero if additional details are wanted, - * or negative for extra quiet operation. */ - opt_verbose, - /** Flag for recursive/non-recursive behaviour. - * Starting with 0, gets incremented with \c -R and decremented with \c - * -N. Different actions have different default levels. */ - opt_recursive, - /** If this is an import/export command (eg restoration after harddisk - * crash), we don't use the WAA for data storage. */ - is_import_export, - /** Flag saying whether the local update should only set the entry_status - * of existing entries and not check for new ones. Needed for update. */ - only_check_status, - /** Whether debug messages are wanted. */ - debuglevel; - -/** A pointer to the commit message; possibly a mmap()ped file. */ -extern char *opt_commitmsg, - /** The file name of the commit message file. */ - *opt_commitmsgfile; - -/** The revision we're getting from the repository. */ -extern svn_revnum_t target_revision; -/** The revision the user wants to get at (\c -r parameter). -* \c HEAD is represented by \c SVN_INVALID_REVNUM. -* Has to be splitted per-URL when we're going to multi-url operation. */ -extern svn_revnum_t opt_target_revision; -/** The second revision number the user specified. */ -extern svn_revnum_t opt_target_revision2; -/** How many revisions the user specified on the commandline (0, 1 or 2). -* For multi-update operations it's possibly to update the urls to different -* revisions; then we need to know for which urls the user specified a -* revision number. Per default we go to \c HEAD. -* */ -extern int opt_target_revisions_given; +/** Greater than zero if additional details are wanted, or negative for + * extra quiet operation. */ +extern int opt_verbose, + /** Flag for recursive/non-recursive behaviour. + * Starting with 0, gets incremented with \c -R and decremented with \c + * -N. Different actions have different default levels. */ + opt_recursive, + /** If this is an import/export command (eg restoration after harddisk + * crash), we don't use the WAA for data storage. */ + is_import_export, + /** Flag saying whether the local update should only set the entry_status + * of existing entries and not check for new ones. Needed for update. */ + only_check_status, + /** Whether debug messages are wanted. */ + debuglevel; + + /** A pointer to the commit message; possibly a mmap()ped file. */ + extern char *opt_commitmsg, + /** The file name of the commit message file. */ + *opt_commitmsgfile; + + /** The revision we're getting from the repository. */ + extern svn_revnum_t target_revision; + /** The revision the user wants to get at (\c -r parameter). + * \c HEAD is represented by \c SVN_INVALID_REVNUM. + * Has to be splitted per-URL when we're going to multi-url operation. */ + extern svn_revnum_t opt_target_revision; + /** The second revision number the user specified. */ + extern svn_revnum_t opt_target_revision2; + /** How many revisions the user specified on the commandline (0, 1 or 2). + * For multi-update operations it's possibly to update the urls to different + * revisions; then we need to know for which urls the user specified a + * revision number. Per default we go to \c HEAD. + * */ + extern int opt_target_revisions_given; -/** The local character encoding, according to \a LC_ALL or \a LC_CTYPE) */ + /** The local character encoding, according to \a LC_ALL or \a LC_CTYPE) */ #ifdef HAVE_LOCALES -extern char *local_codeset; + extern char *local_codeset; #endif -/** The session handle for RA operations. */ -extern svn_ra_session_t *session; + /** The session handle for RA operations. */ + extern svn_ra_session_t *session; -/** The first allocated APR pool. All others are derived from it and its -* children. */ -extern apr_pool_t *global_pool; - -/** The array of URLs. */ -extern struct url_t **urllist; -/** Number of URLs we have. */ -extern int urllist_count; -/** Pointer to \b current URL. */ -extern struct url_t *current_url; - -extern unsigned approx_entry_count; -/** @} */ - - -extern char propname_mtime[], - /** Modification time - \c svn:owner */ - propname_owner[], - /** Modification time - \c svn:group */ - propname_group[], - /** Modification time - \c svn:unix-mode */ - propname_umode[], - /** Original MD5 for encoded entries. */ - propname_origmd5[], - /** Flag for special entry. */ - propname_special[], - /** The value for the special property; normally \c "*". */ - propval_special[], - - /** Commit-pipe program. */ - propval_commitpipe[], - /** Update-pipe program. */ - propval_updatepipe[]; - - -/** \addtogroup cmds_strings Common command line strings - * \ingroup compat - * - * These strings may have to be localized some time, that's why they're - * defined in this place. */ -/** @{ */ -extern char parm_dump[], - parm_load[]; + /** The first allocated APR pool. All others are derived from it and its + * children. */ + extern apr_pool_t *global_pool; + + /** The array of URLs. */ + extern struct url_t **urllist; + /** Number of URLs we have. */ + extern int urllist_count; + /** Pointer to \b current URL. */ + extern struct url_t *current_url; + + extern unsigned approx_entry_count; + /** @} */ + + + extern char propname_mtime[], + /** Modification time - \c svn:owner */ + propname_owner[], + /** Modification time - \c svn:group */ + propname_group[], + /** Modification time - \c svn:unix-mode */ + propname_umode[], + /** Original MD5 for encoded entries. */ + propname_origmd5[], + /** Flag for special entry. */ + propname_special[], + /** The value for the special property; normally \c "*". */ + propval_special[], + + /** Commit-pipe program. */ + propval_commitpipe[], + /** Update-pipe program. */ + propval_updatepipe[]; + + + /** \addtogroup cmds_strings Common command line strings + * \ingroup compat + * + * These strings may have to be localized some time, that's why they're + * defined in this place. */ + /** @{ */ + extern char parm_dump[], + parm_load[]; /** @} */ /** Remember where we started. */ diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/hash_ops.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/hash_ops.c --- fsvs-1.1.14/src/hash_ops.c 2008-02-21 06:15:16.000000000 +0000 +++ fsvs-1.1.17/src/hash_ops.c 2008-06-04 07:16:25.000000000 +0100 @@ -84,102 +84,102 @@ * { * rank=same; * key; -* 1; -* } -* key -> C1; -* 1 -> C0; -* -* edge [style=dotted]; -* edge [arrowhead=none, arrowtail=normal]; -* 1 -> C1:p; -* -* edge [style=invis, weight=20]; -* key -> 1; -* } -* \enddot -* After several insertions, the situation might be like this: -* \dot -* digraph { - * node [shape=record]; - * { - * rank=same; - * Ca [label = " {

    0 | value_A } " ]; - * Cb [label = " {

    0 | value_B2 } " ]; - * Cb2 [label = " {

    1 | value_B1 } " ]; - * Cc3 [label = " {

    3 | value_C3 } " ]; - * Cc2 [label = " {

    2 | value_C2 } " ]; - * Cc [label = " {

    0 | value_C1 } " ]; - * } - * { - * rank=same; - * key_A; - * key_B; - * key_C; - * 1; - * 2; - * 3; - * } - * - * "key_A" -> Ca; - * - * "key_B" -> Cb2; - * "1" -> Cb; - * - * "key_C" -> Cc3; - * "2" -> Cc; - * "3" -> Cc2; - * - * edge [style=dotted]; - * edge [arrowhead=none, arrowtail=normal]; - * 1 -> Cb2:p; - * 3 -> Cc3:p; - * 2 -> Cc2:p; - * - * edge [style=invis, weight=20]; - * key_A -> key_B; - * 1 -> key_C; - * } - * \enddot - * - * - * \subsection hsh_store_array Storing a verbatim array - * - * If there's a limited number of entries (with known length) to store, an - * array with a defined size might be easiest. \n - * A similar variant would be to simply concatenate the data in the hash - * buckets, with some suitable separator. - * - memory intensive, slow for big buckets (many bytes to copy). - * - For array iteration some special convention for the \c key would - * have to be used, like \c .dsize=0 and \c .dptr=array_def; the last - * returned index would have to be stored in the array structure. - * - Big advantage: fast for reading, doesn't have to seek around. - * \dot - * digraph { - * node [shape=record]; - * C1 [label = "num=4 | v1 | v2 | v3 | v4 | 0 | 0 | 0 | 0 | 0 | 0 | 0" ]; - * "key" -> C1; - * } - * \enddot - * - * - * \subsection Conclusio - * - * Barring other (better) ideas, the array solution is currently - * implemented; the array is of fixed-size, can store only pointers, and - * the function for getting a list allows returning a set of elements. - * - *


    - * */ - - - /** \name Simple hash functions. - * - * @{ */ - - /** Bare open function for internal use. - * - * \a *fname_out, if not \c NULL, gets an allocated copy of the filename. - * */ + * 1; + * } + * key -> C1; + * 1 -> C0; + * + * edge [style=dotted]; + * edge [arrowhead=none, arrowtail=normal]; + * 1 -> C1:p; + * + * edge [style=invis, weight=20]; + * key -> 1; + * } + * \enddot + * After several insertions, the situation might be like this: + * \dot + * digraph { + * node [shape=record]; + * { + * rank=same; + * Ca [label = " {

    0 | value_A } " ]; + * Cb [label = " {

    0 | value_B2 } " ]; + * Cb2 [label = " {

    1 | value_B1 } " ]; + * Cc3 [label = " {

    3 | value_C3 } " ]; + * Cc2 [label = " {

    2 | value_C2 } " ]; + * Cc [label = " {

    0 | value_C1 } " ]; + * } + * { + * rank=same; + * key_A; + * key_B; + * key_C; + * 1; + * 2; + * 3; + * } + * + * "key_A" -> Ca; + * + * "key_B" -> Cb2; + * "1" -> Cb; + * + * "key_C" -> Cc3; + * "2" -> Cc; + * "3" -> Cc2; + * + * edge [style=dotted]; + * edge [arrowhead=none, arrowtail=normal]; + * 1 -> Cb2:p; + * 3 -> Cc3:p; + * 2 -> Cc2:p; + * + * edge [style=invis, weight=20]; + * key_A -> key_B; + * 1 -> key_C; + * } + * \enddot + * + * + * \subsection hsh_store_array Storing a verbatim array + * + * If there's a limited number of entries (with known length) to store, an + * array with a defined size might be easiest. \n + * A similar variant would be to simply concatenate the data in the hash + * buckets, with some suitable separator. + * - memory intensive, slow for big buckets (many bytes to copy). + * - For array iteration some special convention for the \c key would + * have to be used, like \c .dsize=0 and \c .dptr=array_def; the last + * returned index would have to be stored in the array structure. + * - Big advantage: fast for reading, doesn't have to seek around. + * \dot + * digraph { + * node [shape=record]; + * C1 [label = "num=4 | v1 | v2 | v3 | v4 | 0 | 0 | 0 | 0 | 0 | 0 | 0" ]; + * "key" -> C1; + * } + * \enddot + * + * + * \subsection Conclusio + * + * Barring other (better) ideas, the array solution is currently + * implemented; the array is of fixed-size, can store only pointers, and + * the function for getting a list allows returning a set of elements. + * + *


    + * */ + + +/** \name Simple hash functions. + * + * @{ */ + +/** Bare open function for internal use. + * + * \a *fname_out, if not \c NULL, gets an allocated copy of the filename. + * */ int hsh___new_bare(char *wcfile, char *name, int gdbm_mode, GDBM_FILE *output, char **fname_out) @@ -280,6 +280,45 @@ /** -. + * + * The previously marked keys in the hash table are removed; it is not + * checked for empty-ness nor reorganized. */ +int hsh__collect_garbage(hash_t db, int *did_remove) +{ + int status; + int have_removed; + datum key, next; + + status=0; + have_removed=0; + if (db && db->to_delete) + { + key=gdbm_firstkey(db->to_delete); + while (key.dptr) + { + next=gdbm_nextkey(db->to_delete, key); + STOPIF_CODE_ERR( gdbm_delete(db->db, key)!=0, gdbm_errno, + "Removing entry"); + + free(key.dptr); + key=next; + have_removed++; + } + + DEBUGP("%d cleanups", have_removed); + + gdbm_close(db->to_delete); + db->to_delete=NULL; + } + + if (did_remove) *did_remove=have_removed; + +ex: + return status; +} + + +/** -. * * If \a has_failed is set, some error has happened, and the registered * keys are not used for deletion (like a \c ROLLBACK). */ @@ -287,50 +326,33 @@ { int status; int have_removed; - datum key, next; + datum key; status=0; if (!db) goto ex; + have_removed=0; if (db->to_delete) { - have_removed=0; if (!has_failed) - { - key=gdbm_firstkey(db->to_delete); - while (key.dptr) - { - next=gdbm_nextkey(db->to_delete, key); - STOPIF_CODE_ERR( gdbm_delete(db->db, key)!=0, gdbm_errno, - "Removing entry"); - - free(key.dptr); - key=next; - have_removed++; - } - } - - DEBUGP("%d cleanups", have_removed); - - gdbm_close(db->to_delete); - db->to_delete=NULL; + STOPIF( hsh__collect_garbage(db, &have_removed), NULL); + } - /* No more data in that hash? */ - if (hsh__first(db, &key) == ENOENT && - db->filename) - { - DEBUGP("nothing found, removing %s", db->filename); - STOPIF_CODE_ERR( unlink(db->filename)==-1, errno, - "Cleaning up the empty hash '%s'", db->filename); - } - else - { - DEBUGP("reorganize?"); - /* At least fewer space used? */ - if (have_removed) - gdbm_reorganize(db->db); - } + /* No more data in that hash? */ + if (hsh__first(db, &key) == ENOENT && + db->filename) + { + DEBUGP("nothing found, removing %s", db->filename); + STOPIF( waa__delete_byext(db->filename, NULL, 0), + "Cleaning up the empty hash '%s'", db->filename); + } + else + { + DEBUGP("reorganize?"); + /* At least fewer space used? */ + if (have_removed) + gdbm_reorganize(db->db); } DEBUGP("closing hash"); @@ -388,13 +410,16 @@ { datum k; + /* Get next key. */ k=gdbm_nextkey(db->db, *oldkey); + /* Ev. free old key-data. */ if (oldkey == key) /* Should be IF_FREE(oldkey->dptr) -- but oldkey==key, and oldkey is * read-only. */ IF_FREE(key->dptr); + /* Return new key. */ *key=k; return (k.dptr) ? 0 : ENOENT; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/hash_ops.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/hash_ops.h --- fsvs-1.1.14/src/hash_ops.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/hash_ops.h 2008-05-16 05:45:04.000000000 +0100 @@ -96,6 +96,8 @@ /** Close a property file. */ int hsh__close(hash_t db, int has_failed); +/** Collect garbage in the hash table. */ +int hsh__collect_garbage(hash_t db, int *did_remove); /** @} */ diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/helper.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/helper.c --- fsvs-1.1.14/src/helper.c 2008-03-10 07:58:13.000000000 +0000 +++ fsvs-1.1.17/src/helper.c 2008-10-25 12:09:08.000000000 +0100 @@ -19,6 +19,7 @@ #include #include #include +#include #include "global.h" #include "waa.h" @@ -815,7 +816,7 @@ if (cur == 0x7f) { - status=fputs("\\x7f", output); + STOPIF_CODE_EPIPE( fputs("\\x7f", output), NULL); continue; } @@ -823,22 +824,15 @@ * The things above 0x80 are needed. */ if (curmd5_ctx); - memcpy(encoder->output_md5, md5, sizeof(*encoder->output_md5)); + if (encoder->output_md5) + memcpy(encoder->output_md5, md5, sizeof(*encoder->output_md5)); DEBUGP("encode end gives MD5 of %s", cs__md52hex(md5)); STOPIF_CODE_ERR(retval != 0, ECHILD, @@ -1180,6 +1175,7 @@ * */ int hlp__encode_filter(svn_stream_t *s_stream, const char *command, int is_writer, + char *path, svn_stream_t **output, struct encoder_t **encoder_out, apr_pool_t *pool) @@ -1258,6 +1254,9 @@ /* \todo: possibly substitute some things in command, like filename or * similar. */ + if (path[0] == '.' && path[1] == PATH_SEPARATOR) + path+=2; + setenv(FSVS_EXP_CURR_ENTRY, path, 1); /* We could do a system in the parent process, but then we'd have to * juggle the filedescriptors around. Better to use a childprocess, where @@ -1532,8 +1531,8 @@ parent_with_arg=parent_with_arg->parent; } - /* If we got out of the loop, but there's no ->arg, we must be at the root - * (because ! ->parent is the other condition). + /* If we got out of the loop, but there's no ->arg, we must be at the + * root (because ! ->parent is the other condition). * The root is always the wc_path, so set it as default ... */ /** \todo We should set it beginning from a command line parameter, * if we have one. Preferably the nearest one ... */ @@ -1786,7 +1785,7 @@ if (!start) start=time(NULL); /* We delay with 25ms accuracy. */ - while (time(NULL) == start) + while (time(NULL) <= start) usleep(25000); } @@ -1839,3 +1838,26 @@ ex: return status; } + + +/** -. + * Caches the result, so that the configuration is only fetched a single time. + */ +int hlp__get_svn_config(apr_hash_t **config) +{ + int status; + svn_error_t *status_svn; + static apr_hash_t *cfg=NULL; + + + status=0; + /* We assume that a config hash as NULL will never be returned. + * (Else we'd try to fetch it more than once.) */ + if (!cfg) + STOPIF_SVNERR( svn_config_get_config, + (&cfg, opt__get_string(OPT__CONFIG_DIR), global_pool)); + + *config=cfg; +ex: + return status; +} diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/helper.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/helper.h --- fsvs-1.1.14/src/helper.h 2008-03-10 07:58:13.000000000 +0000 +++ fsvs-1.1.17/src/helper.h 2008-10-02 19:42:42.000000000 +0100 @@ -87,34 +87,40 @@ * (We have to read some data, but don't know how much we can send * further down the chain - so we have to buffer).*/ struct encoder_t { - /** Whether we're writing or reading. */ - int is_writer; /** Our datasource/sink. */ svn_stream_t *orig; + + /** Where to put the final md5. */ + md5_digest_t *output_md5; + + /** The un-encoded data digest (context). */ + apr_md5_ctx_t md5_ctx; + /** How many bytes are left to send in this buffer. */ + apr_size_t bytes_left; + /** PID of child, for \c waitpid(). */ pid_t child; + + /** Whether we're writing or reading. */ + int is_writer; /** STDIN filehandle for child. */ int pipe_in; /** STDOUT filehandle for child. */ int pipe_out; /** Whether we can get more data. */ int eof; - /** The un-encoded data digest (context). */ - apr_md5_ctx_t md5_ctx; - /** How many bytes are left to send in this buffer. */ - apr_size_t bytes_left; /** Where unsent data starts. */ int data_pos; + /** The buffer. */ char buffer[ENCODE_BLOCKSIZE]; - /** Where to put the final md5. */ - md5_digest_t *output_md5; }; /** Encode \c svn_stream_t filter. */ int hlp__encode_filter(svn_stream_t *s_stream, const char *command, int is_writer, + char *path, svn_stream_t **output, struct encoder_t **encoder_out, apr_pool_t *pool); @@ -158,4 +164,8 @@ const char **unique_name, apr_pool_t *pool); + +/** Reads the subversion config file(s), found by \ref o_configdir. */ +int hlp__get_svn_config(apr_hash_t **config); + #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/ignore.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/ignore.c --- fsvs-1.1.14/src/ignore.c 2008-04-02 06:25:13.000000000 +0100 +++ fsvs-1.1.17/src/ignore.c 2008-10-29 07:19:09.000000000 +0000 @@ -37,9 +37,9 @@ * \addtogroup cmds * \section ignore * - * \code - * fsvs ignore [prepend|append|at=n] pattern[s] + * \code * fsvs ignore dump|load + * fsvs ignore [prepend|append|at=n] pattern [pattern ...] * \endcode * * This command adds patterns to the end of the ignore list, @@ -95,8 +95,42 @@ * * \note Please take care that your wildcard patterns are not expanded * by the shell! + */ + + /** + * \addtogroup cmds + * \section rign + * + * \code + * fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...] + * fsvs ri [prepend|append|at=n] path-spec [path-spec ...] + * \endcode * - **/ + * If you use more than a single working copy for the same data, it will be + * stored in different paths - and that makes absolute ignore patterns + * infeasible. But relative ignore patterns are anchored at the beginning + * of the WC root - which is a bit tiring if you're deep in your WC + * hierarchy and want to ignore some files. + * + * To make that easier you can use the \c rel-ignore (abbreviated as \c ri) + * command; this converts all given path-specifications (that may include + * wildcards as per the shell pattern specification above) to WC-relative + * values before storing them. + * + * Example for \c /etc as working copy root: + * \code + * fsvs rel-ignore '/etc/X11/xorg.conf.*' + * + * cd /etc/X11 + * fsvs rel-ignore 'xorg.conf.*' + * \endcode + * Both commands would store the pattern "./X11/xorg.conf.*". + * + * \note This works only for \ref ign_shell "shell patterns". + * + * For more details about ignoring files please see the \ref ignore command + * and \ref ignpat. + */ /** * \defgroup ignpat_dev Developers' reference @@ -229,6 +263,10 @@ * pattern ./sys will match \b only a file or directory named \c * sys. If you want to exclude a directories' files, but not the directory * itself, use something like ./dir/§* or ./dir/§** + * + * If you're deep within your working copy and you'd like to ignore some + * files with a WC-relative ignore pattern, you might like to use the \ref + * rign command. * * * \subsection ignpat_shell_abs Absolute shell patterns @@ -347,9 +385,9 @@ * * \section ign_mod Modifiers * - * All of these patterns can have one or more of these modifiers *before* - * them; not all combinations make sense. - * + * All of these patterns can have one or more of these modifiers \b before + * them, with (currently) optional \c "," as separators; not all + * combinations make sense. * * *
    ModifierMeaning @@ -357,16 +395,60 @@ * Ignore case for matching *
    t * A negative ignore pattern, ie. a take pattern. + *
    d + * Match directories only. This is useful if you have a directory + * tree in which only certain files should be taken; see below. + *
    m:specification + * Mode matching; this expects a specification of two octal values in + * the form m:and_value:compare_value, like + * m:04:00; the following examples give only the numbers. \n + * As an example: the file has mode \c 0750; a specification of
      + *
    • 0700:0700 matches, and + *
    • 0007:0007 doesn't match.
    \n + * A real-world example: 0007:0000 would match all entries that + * have \b no right bits set for \e "others", and could be used to + * exclude private files (like \c /etc/shadow). (Alternatively, the \e + * others-read bit could be used: 0004:0000. \n + * FSVS will give an error for invalid specifications, ie. ones that can + * never match; an example would be 0700:0007. *
    * + * For patterns with the \c m (mode match) and \c d (dironly) modifiers the + * filename pattern gets optional; so you don't have to give an all-match + * wildcard pattern (./§**) for these cases. + * * \code * t./proc/stat * ./proc/ * \endcode - * Such - * declaration would store \e only \c /proc/stat , and nothing else of \c - * /proc . + * Such declaration would store \e only \c /proc/stat , and nothing else + * of \c /proc . * + * \code + * t,d,./var/vmail/§** + * t./var/vmail/§**§/.*.sieve + * ./var/vmail/§** + * \endcode + * This would take all \c ".*.sieve" files (or directories) below + * \c /var/vmail, in all depths, and all directories there; but no other + * files. + * + * If your files are at a certain depth, and you don't want all other + * directories taken, too, you can specify that exactly: + * \code + * td./var/vmail/§*§ + * td./var/vmail/§*§/§* + * t./var/vmail/§*§/§*§/.*.sieve + * ./var/vmail/§** + * \endcode + * + * \code + * m:04:0 + * t,./etc/ + * ./§** + * \endcode + * This would take all files from \c /etc, but ignoring the files that are + * not world-readable (\c other-read bit cleared). * */ @@ -696,9 +778,11 @@ int ign___init_pattern_into(char *pattern, char *end, struct ignore_t *ignore) { int status, stop; + int and_value, cmp_value; char *cp; + status=0; cp=pattern+strlen(pattern); if (!end || end>cp) end=cp; @@ -711,9 +795,9 @@ } /* This are the defaults: */ + memset(ignore, 0, sizeof(*ignore)); ignore->pattern = pattern; ignore->is_ignore=1; - ignore->is_icase=0; stop=0; while (!stop) { @@ -722,28 +806,69 @@ case 't': ignore->is_ignore=0; break; + case 'd': + ignore->dir_only=1; + break; case 'i': ignore->is_icase=1; break; + case ',': + /* Separator, currently just ignored. */ + break; + case 'm': + STOPIF_CODE_ERR( ignore->mode_match_and, EINVAL, + "!Pattern \"%s\" has two or more mode specifications.", + ignore->pattern); + + STOPIF_CODE_ERR( sscanf(pattern+1, ":%o:%o%n", + &and_value, &cmp_value, &stop) != 2, EINVAL, + "!Ignore pattern \"%s\" has a bad mode specification;\n" + "the expected syntax is \"m::\".", + ignore->pattern); + + STOPIF_CODE_ERR( and_value>07777 || cmp_value>0777 || + (cmp_value & ~and_value), EINVAL, + "Mode matching specification in \"%s\" has invalid numbers.", + ignore->pattern); + + ignore->mode_match_and=and_value; + ignore->mode_match_cmp=cmp_value; + pattern += stop; + stop=0; + break; default: stop=1; break; } + DEBUGP("now at %d == %p; end=%p", *pattern, pattern, end); if (!stop) { pattern++; - STOPIF_CODE_ERR( pattern>=end, EINVAL, - "pattern not \\0-terminated"); + STOPIF_CODE_ERR( pattern>end || (pattern == end && *end!=0), + EINVAL, "pattern not \\0-terminated"); } } - STOPIF_CODE_ERR(!pattern, EINVAL, "pattern ends prematurely"); - DEBUGP("pattern: %ccase, %s", + /* Don't know if it makes *really* sense to allow a dironly pattern + * without pattern - but there's no reason to deny it outright. */ + STOPIF_CODE_ERR(!(*pattern || ignore->mode_match_and || + ignore->dir_only), EINVAL, + "!Pattern \"%s\"ends prematurely", ignore->pattern); + + DEBUGP("pattern: %ccase, %s, %sdironly, mode&0%o==0%o", ignore->is_icase ? 'I' : ' ', - ignore->is_ignore ? "ignore" : "take"); + ignore->is_ignore ? "ignore" : "take", + ignore->dir_only ? "" : "not ", + ignore->mode_match_and, ignore->mode_match_cmp); - if (strncmp(dev_prefix, pattern, strlen(dev_prefix)) == 0) + if (!*pattern) + { + /* Degenerate case of shell pattern without pattern; allowed in certain + * cases. */ + ignore->type=PT_SHELL; + } + else if (strncmp(dev_prefix, pattern, strlen(dev_prefix)) == 0) { ignore->type=PT_DEVICE; ignore->compare_string = pattern; @@ -988,7 +1113,9 @@ } -/* Searches this entry for a take/ignore +/** -. + * + * Searches this entry for a take/ignore pattern. * * If a parent directory has an ignore entry which might be valid * for this directory (like **§/§*~), it is mentioned in this @@ -999,6 +1126,8 @@ * we cannot easily optimize. * is_ignored is set to +1 if ignored, 0 if unknown, and -1 if * on a take-list (overriding later ignore list). + * + * \a sts must already have the correct estat::st.mode bits set. */ int ign__is_ignore(struct estat *sts, int *is_ignored) @@ -1018,7 +1147,7 @@ /* root directory won't be ignored */ if (!dir) goto ex; - if (ops___filetype(&(sts->st)) == FT_IGNORE) + if (sts->to_be_ignored) { *is_ignored=1; goto ex; @@ -1091,15 +1220,29 @@ if (ign->type == PT_SHELL || ign->type == PT_PCRE || ign->type == PT_SHELL_ABS) { - DEBUGP("matching %s against %s", - cp, ign->pattern); - status=pcre_exec(ign->compiled, ign->extra, - cp, len, - 0, 0, - NULL, 0); - STOPIF_CODE_ERR( status && status != PCRE_ERROR_NOMATCH, - status, "cannot match pattern %s on data %s", - ign->pattern, cp); + DEBUGP("matching %s(0%o) against \"%s\" " + "(dir_only=%d; and=0%o, cmp=0%o)", + cp, sts->st.mode, ign->pattern, ign->dir_only, + ign->mode_match_and, ign->mode_match_cmp); + if (ign->dir_only && !S_ISDIR(sts->st.mode)) + { + status=PCRE_ERROR_NOMATCH; + } + else if (ign->mode_match_and && + ((sts->st.mode & ign->mode_match_and) != ign->mode_match_cmp)) + { + status=PCRE_ERROR_NOMATCH; + } + else if (ign->compiled) + { + status=pcre_exec(ign->compiled, ign->extra, + cp, len, + 0, 0, + NULL, 0); + STOPIF_CODE_ERR( status && status != PCRE_ERROR_NOMATCH, + status, "cannot match pattern %s on data %s", + ign->pattern, cp); + } } else if (ign->type == PT_DEVICE) { @@ -1318,7 +1461,45 @@ } -/* This is called to append new ignore patterns. +/** Parses the optional position specification. + * */ +int ign___parse_position(char *arg, int *position, int *advance) +{ + int status; + int i; + + status=0; + *advance=0; + + /* Normal pattern inclusion. May have a position specification here. */ + *position=PATTERN_POSITION_END; + if (strcmp(arg, "prepend") == 0) + { + *advance=1; + *position=PATTERN_POSITION_START; + } + else if (sscanf(arg, "at=%d", &i) == 1) + { + *advance=1; + STOPIF_CODE_ERR(i > used_ignore_entries, EINVAL, + "The position %d where the pattern " + "should be inserted is invalid.\n", i); + *position=i; + } + else if (strcmp(arg, "append") == 0) + { + /* Default */ + *advance=1; + } + +ex: + return status; +} + + + +/** -. + * This is called to append new ignore patterns. **/ int ign__work(struct estat *root UNUSED, int argc, char *argv[]) { @@ -1339,6 +1520,8 @@ /* Goto correct base. */ status=waa__find_common_base(0, NULL, NULL); if (status == ENOENT) + STOPIF(EINVAL, "!No working copy base was found."); + STOPIF(status, NULL); DEBUGP("first argument is %s", argv[0]); @@ -1383,31 +1566,9 @@ } else { - /* Normal pattern inclusion. May have a position specification here. */ - position=PATTERN_POSITION_END; - if (strcmp(argv[0], "prepend") == 0) - { - argv++; - argc--; - position=PATTERN_POSITION_START; - } - else if (sscanf(argv[0], "at=%d", &i) == 1) - { - argv++; - argc--; - STOPIF_CODE_ERR(i > used_ignore_entries, EINVAL, - "The position %d where the pattern " - "should be inserted is invalid.\n", i); - position=i; - } - else if (strcmp(argv[0], "append") == 0) - { - /* Default */ - argv++; - argc--; - } - - + STOPIF( ign___parse_position(argv[0], &position, &i), NULL); + argv+=i; + argc-=i; STOPIF( ign__new_pattern(argc, argv, NULL, 1, position), NULL); } } /* not "fsvs load" */ @@ -1419,6 +1580,39 @@ } +/** -. + * Relativizes the given paths, and stores them. + **/ +int ign__rign(struct estat *root UNUSED, int argc, char *argv[]) +{ + int status; + int i, position; + char **normalized; + + status=0; + if (argc==0) ac__Usage_this(); + + /* Position given? */ + STOPIF( ign___parse_position(argv[0], &position, &i), NULL); + argv+=i; + argc-=i; + + /* Goto correct base. */ + status=waa__find_common_base2(argc, argv, &normalized, 1); + if (status == ENOENT) + STOPIF(EINVAL, "!No working copy base was found."); + STOPIF(status, NULL); + + /* Load, insert, save. */ + STOPIF( ign__load_list(NULL), NULL); + STOPIF( ign__new_pattern(argc, normalized, NULL, 1, position), NULL); + STOPIF( ign__save_ignorelist(NULL), NULL); + +ex: + return status; +} + + #if 0 inline int ign___do_parent_list(struct ignore_t ***target, int next_index, struct ignore_t **source, diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/ignore.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/ignore.h --- fsvs-1.1.14/src/ignore.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/ignore.h 2008-07-17 05:42:38.000000000 +0100 @@ -26,6 +26,8 @@ /** Ignore command main function. */ work_t ign__work; +/** Rel-ignore command main function. */ +work_t ign__rign; /** Adds a list of new ignore patterns to the internal list. */ int ign__new_pattern(unsigned count, diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/info.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/info.c --- fsvs-1.1.14/src/info.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/info.c 2008-09-15 16:18:46.000000000 +0100 @@ -73,8 +73,9 @@ sts->was_output=0; sts->flags |= RF_PRINT; + /* TODO: entry_type is already overwritten by ops__stat_to_action() */ STOPIF( st__status(sts), NULL); - STOPIF( st__print_entry_info(sts, 1), NULL); + STOPIF( st__print_entry_info(sts), NULL); ex: return status; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/interface.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/interface.h --- fsvs-1.1.14/src/interface.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/interface.h 2008-10-25 12:11:00.000000000 +0100 @@ -20,27 +20,6 @@ * environment variables and similar. */ /** @{ */ -/** \anchor envs - * \name Environment variables - * @{ */ -/** The environment variable to override the spool - * directory (default /var/spool/fsvs). */ -#define WAA__PATH_ENV "FSVS_WAA" - -/** The environment variable to override the configuration directory - * (default /etc/fsvs). */ -#define CONF__PATH_ENV "FSVS_CONF" - -/** The environment variable to set warning defaults. - * Contains a list of specifications just like used after \c -W, separated - * by one of \c ",;\\r\\n\\t". - * Example: - * \code - * export FSVS_WARNINGS=chmod-eperm=ignore - * \endcode - * */ -#define WARNINGS_ENV "FSVS_WARNINGS" - /** If this variable has a numeric value other than 0, the debuglevel is * set even before commandline parsing. */ #define FSVS_DEBUG_ENV "FSVS_DEBUGLEVEL" @@ -60,6 +39,14 @@ /** @} */ +/** The default WAA path. */ +#define DEFAULT_WAA_PATH "/var/spool/fsvs" +/** The default CONF path. */ +#define DEFAULT_CONF_PATH "/etc/fsvs" +/** The default config directory (for authentication data), + * relative to $FSVS_CONF. */ +#define DEFAULT_CONFIGDIR_SUB "/auth" + /** \name List of environment variables used for a chroot jail. * Note that these are not \c \#ifdef - marked, as we'd like to use * off-the-shelf binaries from newer distributions without modifications! @@ -75,6 +62,37 @@ /** @} */ +/** \defgroup exp_env Exported environment variables + * \ingroup interface + * Programs started by FSVS, like \ref o_diff or in the \ref + * FSVS_PROP_COMMIT_PIPE "fsvs:commit-pipe", get some environment variables + * set, to help them achieve their purpose. + * + * */ +/** @{ */ +/** The (relative) path of the current entry. */ +#define FSVS_EXP_CURR_ENTRY "FSVS_CURRENT_ENTRY" +/** The configuration directory for the current working copy. */ +#define FSVS_EXP_WC_CONF "FSVS_WC_CONF" +/** The current working copy root directory. */ +#define FSVS_EXP_WC_ROOT "FSVS_WC_ROOT" +/** The revision we're updating or reverting to. */ +#define FSVS_EXP_TARGET_REVISION "FSVS_TARGET_REVISION" +/** \addtogroup exp_env + * + * Apart from these \c $FSVS_CONF and \c $FSVS_WAA are always set. + * + * Others might be useful, but I'm waiting for a specific user telling her needs before implementing them. + * - Base URL, and/or URL for current entry \n + * For multi-URL only the topmost? Or all? + * - Other filenames for merge and diff? + * - \c BASE, \c HEAD and other revisions + * + * Do you need something? Just ask me. + * @} */ + + + /** \name Manber-parameters * * These should be written to a property for big files, diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/log.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/log.c --- fsvs-1.1.14/src/log.c 2008-03-11 07:13:48.000000000 +0000 +++ fsvs-1.1.17/src/log.c 2008-06-10 05:49:17.000000000 +0100 @@ -23,14 +23,13 @@ * \e path, or, if none, the highest priority URL. * * The optional \e rev1 and \e rev2 can be used to restrict the - * revisions that are shown; if no values are given, the logs are - * given starting from HEAD downwards. + * revisions that are shown; if no values are given, the logs are given + * starting from \c HEAD downwards, and then a limit on the number of + * revisions is applied (but see the \ref o_logmax "limit" option). * * If you use the \ref glob_opt_verb "-v" -option, you get the files * changed in each revision printed, too. * - * Currently at most 100 log messages are shown. - * * There is an option controlling the output format; see \ref o_logoutput. * * TODOs: @@ -38,7 +37,6 @@ * - Show revision for \b all URLs associated with a working copy? * In which order? * - A URL-parameter, to specify the log URL. (Name) - * - Limit number of revisions shown? * */ @@ -131,19 +129,18 @@ STOPIF( log___divider(output, ANSI__GREEN), NULL); /* Taken from a svn commit message. */ - STOPIF_CODE_ERR( -1 == fprintf(output, - "r%llu | %s | %s | %d line%s\n" - "%s", - (t_ull)revision, auth, dat, lines, - lines == 1 ? "" : "s", - (opt__get_int(OPT__LOG_OUTPUT) & LOG__OPT_COLOR) ? ANSI__NORMAL : ""), - errno, NULL); + STOPIF_CODE_EPIPE( fprintf(output, + "r%llu | %s | %s | %d line%s\n" + "%s", + (t_ull)revision, auth, dat, lines, + lines == 1 ? "" : "s", + (opt__get_int(OPT__LOG_OUTPUT) & LOG__OPT_COLOR) ? ANSI__NORMAL : ""), + NULL); /* Print optionally the filenames */ if (changed_paths) { - STOPIF_CODE_ERR( -1 == fputs("Changed paths:\n", output), - errno, NULL); + STOPIF_CODE_EPIPE( fputs("Changed paths:\n", output), NULL); hi=apr_hash_first(pool, changed_paths); while (hi) { @@ -151,13 +148,12 @@ STOPIF( hlp__utf82local( name, &local_name, namelen), NULL); - STOPIF_CODE_ERR( -1 == fprintf(output, " %s\n", local_name), - errno, NULL); + STOPIF_CODE_EPIPE( fprintf(output, " %s\n", local_name), NULL); hi = apr_hash_next(hi); } } - STOPIF_CODE_ERR( -1 == fputs("\n", output), errno, NULL); + STOPIF_CODE_EPIPE( fputs("\n", output), NULL); /* Convert the message in parts; * - so that not too big buffers are processed at once, and @@ -201,7 +197,7 @@ DEBUGP("log output: %d bytes", cur); STOPIF( hlp__utf82local(message, &mess, cur), NULL); - STOPIF_CODE_ERR( fputs(mess, output) == EOF, errno, NULL); + STOPIF_CODE_EPIPE( fputs(mess, output), NULL); message+=cur; len-=cur; @@ -211,7 +207,7 @@ sol= ccp!=NULL; } - STOPIF_CODE_ERR( putc('\n', output) == EOF, EPIPE, NULL); + STOPIF_CODE_EPIPE( putc('\n', output), NULL); ex: RETURN_SVNERR(status); @@ -232,7 +228,6 @@ int limit; char **normalized; FILE *output=stdout; - svn_revnum_t head; status_svn=NULL; @@ -252,7 +247,7 @@ if (argc) { STOPIF( ops__traverse(root, normalized[0], 0, 0, &sts), - "This entry is unknown."); + "!The entry \"%s\" cannot be found.", normalized[0]); if (!sts->url) { STOPIF_CODE_ERR(urllist_count>1, EINVAL, @@ -270,7 +265,7 @@ } DEBUGP("doing URL %s", current_url->url); - STOPIF( url__open_session(&session), NULL); + STOPIF( url__open_session(NULL), NULL); if (argc) @@ -289,36 +284,37 @@ hlp__rev_to_string(opt_target_revision2)); - limit=100; + /* To take the difference (for -rX:Y) we need to know HEAD. */ + STOPIF( url__canonical_rev(current_url, &opt_target_revision), NULL); + STOPIF( url__canonical_rev(current_url, &opt_target_revision2), NULL); + switch (opt_target_revisions_given) { case 0: opt_target_revision=SVN_INVALID_REVNUM; opt_target_revision2=1; + + STOPIF( url__canonical_rev(current_url, &opt_target_revision), NULL); + opt__set_int(OPT__LOG_MAXREV, PRIO_DEFAULT, 100); break; case 1: opt_target_revision2 = 1; - limit=1; + opt__set_int(OPT__LOG_MAXREV, PRIO_DEFAULT, 1); break; case 2: + opt__set_int(OPT__LOG_MAXREV, + PRIO_DEFAULT, + abs(opt_target_revision-opt_target_revision2)+1); break; default: BUG("how many"); } + limit=opt__get_int(OPT__LOG_MAXREV); - /* DAV (http:// and https://) don't like getting SVN_INVALID_REVNUM - - * they throw an 175007 "HTTP Path Not Found", and "REPORT request - * failed on '...'". Get the real number. */ - STOPIF_SVNERR( svn_ra_get_latest_revnum, - (session, &head, global_pool)); - DEBUGP("HEAD is at %ld", head); - if (opt_target_revision == SVN_INVALID_REVNUM) - opt_target_revision=head; - if (opt_target_revision2 == SVN_INVALID_REVNUM) - opt_target_revision2=head; - + DEBUGP("log limit at %d", limit); - status_svn=svn_ra_get_log(session, + STOPIF_SVNERR( svn_ra_get_log, + (current_url->session, paths, opt_target_revision, opt_target_revision2, @@ -327,20 +323,7 @@ 0, // TODO: stop-on-copy, log__receiver, output, - global_pool); - - /* Quit silently on EPIPE. */ - if (status_svn && - status_svn->apr_err == EPIPE) - { - status=0; - status_svn=NULL; - goto ex; - } - - /* A bit of a hack. */ - STOPIF_SVNERR(status_svn, +0); - + global_pool) ); STOPIF( log___divider(output, ANSI__NORMAL), NULL); diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/Makefile.in /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/Makefile.in --- fsvs-1.1.14/src/Makefile.in 2008-02-22 19:17:52.000000000 +0000 +++ fsvs-1.1.17/src/Makefile.in 2008-10-25 12:11:53.000000000 +0100 @@ -11,14 +11,14 @@ ################################ Definitions ################################ DIR := /usr/share/doc -HEADURL := "$URL: http://fsvs.tigris.org/svn/fsvs/tags/fsvs-1.1.14/fsvs/src/Makefile.in $" -HEADREV := "$Revision: 1496 $" +HEADURL := "$URL: http://fsvs.tigris.org/svn/fsvs/tags/fsvs-1.1.17/fsvs/src/Makefile.in $" +HEADREV := "$Revision: 1951 $" VERSION = $(shell perl -e '($$r) = (q( $(HEADREV) ) =~ m:(\d+):); $$t= q( $(HEADURL) ) =~ m:/tags/([^/]+): ? $$1 : "trunk"; print "$$t:$$r\n";' ) CFLAGS := @CFLAGS@ CFLAGS += -Wall -funsigned-char -Os -DFSVS_VERSION='"$(VERSION)"' LDFLAGS := @LDFLAGS@ -FSVS_LDFLAGS = $(LDFLAGS) -lsvn_subr-1 -lsvn_ra-1 -lpcre -lgdbm +FSVS_LDFLAGS = $(LDFLAGS) -lsvn_subr-1 -lsvn_delta-1 -lsvn_ra-1 -lpcre -lgdbm ifdef RPATH LDFLAGS += -Wl,-rpath,$(RPATH) @@ -33,6 +33,10 @@ endif endif +# CFLAGS += -m64 -Wpadded +# LDFLAGS += -m64 + + C_FILES := $(wildcard *.c) H_FILES := $(wildcard *.h) @@ -51,6 +55,7 @@ tags: $(C_FILES) $(wildcard *.h) @echo " $@" @-ctags $^ + @echo ":au BufNewFile,BufRead *.c syntax keyword Constant" $(shell grep -v "^!" < $@ | cut -f1 | grep _) > .vimrc .IGNORE: tags clean: rm -f *.o *.s $(D_FILES) $(DEST) 2> /dev/null || true @@ -70,18 +75,29 @@ ################################ Distribution ############################### +bindir = @bindir@ +exec_prefix= @exec_prefix@ +prefix = @prefix@ +mandir = @mandir@ +install: + for d in /etc/fsvs /var/spool/fsvs $(bindir) ; do test -d $$d || mkdir -p $$d; done + cp -a $(DEST) $(bindir) +# cp -a ../doc/fsvs.1 $(mandir) + # No automatic rebuild (?) #../doc/USAGE: $(C_FILES) $(H_FILES) #.PHONY: ../doc/USAGE -DOXDIR=../../www/doxygen/html/ -MANDIR=../../www/doxygen/man/man1/ +DOXDIR=../../doxygen/html/ +MANDIR=../../doxygen/man/man1/ MANDEST=../doc/ -DOXFLAG=../../www/doxygen/html/index.html +DOXFLAG=../../doxygen/html/index.html $(DOXFLAG): ( cat doxygen-data/Doxyfile ; echo PROJECT_NUMBER=$(VERSION)) | doxygen - ( cat doxygen-data/Doxyfile-man ; echo PROJECT_NUMBER=$(VERSION)) | doxygen - # Change the /§* to the correct /* cd $(DOXDIR) && perl -i.bak -pe '1 while s#([/*])\xc2?\xa7([/*])#\1\2#;' *.html + cd $(MANDIR) && perl -i.bak -pe '1 while s#([/*])\xc2?\xa7([/*])#\1\2#;' *.? + rm $(DOXDIR)/*.bak $(DOXDIR)/html-doc.zip || true cd $(DOXDIR)/.. && zip -rq9 html-doc.zip html -x 'html/.svn/*' && tar -cf html-doc.tar --exclude .svn html && bzip2 -vkf9 html-doc.tar && gzip -vf9 html-doc.tar $(DOXDIR)/group__cmds.html: $(DOXFLAG) touch $@ @@ -89,16 +105,17 @@ touch $@ # Fix for badly generated man page (Doxygen) -# Some other idea? Is there some other workarount? +# Some other idea? Is there some other workaround? $(MANDEST)/fsvs.1: $(MANDIR)/cmds.1 - perl -pe 's#^Commands and .*\\- (.SH "SYNOPSIS")#fsvs - fast versioning tool\n\1#' $< > $@ -# cp -a $< $@ -$(MANDEST)/fsvs-options.5: $(MANDIR)/options.1 - cp -a $< $@ + tools/man-repair.pl $@ "FSVS - fast versioning tool" < $< $(MANDEST)/fsvs-howto-backup.5: $(MANDIR)/howto_backup.1 - cp -a $< $@ + tools/man-repair.pl $@ "FSVS - Backup HOWTO" < $< $(MANDEST)/fsvs-howto-master_local.5: $(MANDIR)/howto_master_local.1 - cp -a $< $@ + tools/man-repair.pl $@ "FSVS - Master/Local HOWTO" < $< +$(MANDEST)/fsvs-options.5: $(MANDIR)/options.1 + tools/man-repair.pl $@ "FSVS - Options and configfile" < $< +$(MANDEST)/fsvs-url-format.5: $(MANDIR)/url_format.1 + tools/man-repair.pl $@ "FSVS - URL format" < $< ../doc/USAGE: $(DOXDIR)/group__cmds.html dev/dox2txt.pl $< > $@ @@ -107,8 +124,10 @@ doc.g-c: ../doc/USAGE # Generate static text strings ( cat $< ; echo "end" ) | dev/make_doc.pl > $@ -docs: $(DOXFLAG) ../doc/USAGE ../doc/IGNORING doc.g-c $(MANDEST)/fsvs.1 $(MANDEST)/fsvs-options.5 -# $(MANDEST)/fsvs-howto-backup.5 $(MANDEST)/fsvs-howto-master_local.5 +docs: $(DOXFLAG) ../doc/USAGE ../doc/IGNORING doc.g-c +docs: $(MANDEST)/fsvs.1 $(MANDEST)/fsvs-options.5 +docs: $(MANDEST)/fsvs-url-format.5 +docs: $(MANDEST)/fsvs-howto-backup.5 $(MANDEST)/fsvs-howto-master_local.5 .PHONY: docs $(DOXFLAG) @@ -129,7 +148,7 @@ # For debugging: generate preprocessed, generate assembler %.s: %.c - $(CC) $(CFLAGS) -S -o $@ $< || true + $(CC) $(CFLAGS) -S -fverbose-asm -o $@ $< || true %.P : %.c $(CC) $(CFLAGS) -E -o $@ $< @@ -179,6 +198,9 @@ revcount: count @last_rev=$(shell svk info | grep Revision | cut -d" " -f2) ; echo "number of edits up to revision $$last_rev:" ; for r in `seq 2 $$last_rev` ; do svk diff -r`expr $$r - 1`:$$r /svn2/trunk ; done | perl -pe 's#\ssrc/# #g;' | diffstat +structs: $(DEST) + @for a in `perl -ne 'print $$1,"\n" if m#^\s*struct\s+(\w+)\s+{\s*$$#' $(C_FILES) $(H_FILES)` ; do printf "%-30s " "struct $$a" ; gdb --batch -ex "printf \"\t%6d\", sizeof(struct $$a)" $(DEST) | cut -f2 -d= ; done 2>&1 | sort -k3 -n + .PHONY: revcount count diffstat diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/options.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/options.c --- fsvs-1.1.14/src/options.c 2008-03-27 13:17:58.000000000 +0000 +++ fsvs-1.1.17/src/options.c 2008-10-10 15:57:52.000000000 +0100 @@ -10,6 +10,7 @@ #include "global.h" #include "log.h" +#include "interface.h" #include "options.h" #include "helper.h" #include "warnings.h" @@ -18,12 +19,13 @@ /** \file * Functions dealing with user settings. */ +#define ENV_PREFIX "FSVS_" /** A structure to associate a string with an integer. */ struct opt___val_str_t { - int val; const char *string; + int val; }; @@ -46,9 +48,10 @@ { .string=NULL, } }; -/** Strings for auto/yes/no settings. */ -const struct opt___val_str_t opt___yes_no_auto[]= { - { .val=OPT__AUTO, .string="auto" }, +/** Strings for auto/yes/no settings. + * + * Don't change the order without changing all users! */ +const struct opt___val_str_t opt___yes_no[]= { { .val=OPT__YES, .string="yes" }, { .val=OPT__YES, .string="true" }, { .val=OPT__YES, .string="on" }, @@ -59,7 +62,7 @@ }; /* Why doesn't this work?? */ -// const struct opt___val_str_t opt___yes_no[] = &opt___yes_no_auto[1]; +const struct opt___val_str_t *opt___no_words = opt___yes_no+3; /** Filter strings and bits. @@ -70,14 +73,28 @@ { .val=FS_META_CHANGED, .string="meta" }, { .val=FS_META_MTIME, .string="mtime" }, { .val=FS_META_OWNER, .string="owner" }, + { .val=FS_META_UMODE, .string="mode" }, { .val=FS_META_GROUP, .string="group" }, { .val=FS_NEW, .string="new" }, + { .val=FS_CHANGED, .string="changed" }, { .val=FS_REMOVED, .string="deleted" }, + { .val=FS_REMOVED, .string="removed" }, { .val=0, .string="none" }, { .string=NULL, } }; +/** Change detection strings. + * \ref o_chcheck. */ +const struct opt___val_str_t opt___chcheck_strings[]= { + { .val=0, .string="none" }, + { .val=CHCHECK_FILE, .string="file_mtime" }, + { .val=CHCHECK_DIRS, .string="dir" }, + { .val=CHCHECK_ALLFILES, .string="allfiles" }, + { .val=-1, .string="full" }, +}; + + /** Delay action names. * See \ref o_delay. */ const struct opt___val_str_t opt___delay_strings[]= { @@ -110,6 +127,7 @@ opt___parse_t opt___strings2bitmap; opt___parse_t opt___strings2empty_bm; opt___parse_t opt___store_string; +opt___parse_t opt___store_env_noempty; opt___parse_t opt___normalized_path; opt___parse_t opt___parse_warnings; opt___parse_t opt___atoi; @@ -127,26 +145,40 @@ .name="path", .i_val=PATH_PARMRELATIVE, .parse=opt___string2val, .parm=opt___path_strings, }, + [OPT__LOG_MAXREV] = { + .name="limit", .i_val=0, .parse=opt___atoi, + }, [OPT__LOG_OUTPUT] = { .name="log_output", .i_val=LOG__OPT_DEFAULT, .parse=opt___strings2empty_bm, .parm=opt___log_output_strings, }, [OPT__COLORDIFF] = { - .name="colordiff", .i_val=OPT__AUTO, - .parse=opt___string2val, .parm=opt___yes_no_auto, + .name="colordiff", .cp_val=NULL, .parse=opt___store_string, }, [OPT__DIR_SORT] = { .name="dir_sort", .i_val=OPT__NO, - .parse=opt___string2val, .parm=opt___yes_no_auto+1, + .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__STATUS_COLOR] = { .name="stat_color", .i_val=OPT__NO, - .parse=opt___string2val, .parm=opt___yes_no_auto+1, + .parse=opt___string2val, .parm=opt___yes_no, + }, + [OPT__STOP_ON_CHANGE] = { + .name="stop_change", .i_val=OPT__NO, + .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__FILTER] = { .name="filter", .i_val=0, .parse=opt___strings2bitmap, .parm=opt___filter_strings, }, + [OPT__CHANGECHECK] = { + .name="change_check", .i_val=CHCHECK_FILE, + .parse=opt___strings2bitmap, .parm=opt___chcheck_strings, + }, + [OPT__ALL_REMOVED] = { + .name="all_removed", .i_val=OPT__YES, + .parse=opt___string2val, .parm=opt___yes_no, + }, [OPT__DEBUG_OUTPUT] = { .name="debug_output", .cp_val=NULL, .parse=opt___store_string, @@ -157,10 +189,10 @@ .parse=opt___string2val, .parm=opt___conflict_strings, }, [OPT__MERGE_PRG] = { - .name="merge_prg", .cp_val="merge", .parse=opt___store_string, + .name="merge_prg", .cp_val="diff3", .parse=opt___store_string, }, [OPT__MERGE_OPT] = { - .name="merge_opt", .cp_val="-A", .parse=opt___store_string, + .name="merge_opt", .cp_val="-m", .parse=opt___store_string, }, [OPT__DIFF_PRG] = { .name="diff_prg", .cp_val="diff", .parse=opt___store_string, @@ -182,10 +214,33 @@ [OPT__COMMIT_TO] = { .name="commit_to", .cp_val=NULL, .parse=opt___store_string, }, + [OPT__AUTHOR] = { + .name="author", .cp_val="", .parse=opt___store_env_noempty, + }, + + /* I thought about using opt___normalized_path() for these two; but that + * would be a change in behaviour. */ + [OPT__WAA_PATH] = { + .name="waa", .parse=opt___store_string, + /* Doing that here gives a warning "initializer not constant". + .cp_val=DEFAULT_WAA_PATH, .i_val=strlen(DEFAULT_WAA_PATH), */ + .cp_val=NULL, .i_val=0, + }, + [OPT__CONF_PATH] = { + .name="conf", .parse=opt___store_string, + /* Doing that here gives a warning "initializer not constant". + .cp_val=DEFAULT_CONF_PATH, .i_val=strlen(DEFAULT_CONF_PATH), */ + .cp_val=NULL, .i_val=0, + }, + [OPT__CONFIG_DIR] = { + .name="config_dir", .parse=opt___store_string, + .cp_val=NULL, .i_val=0, + }, + [OPT__EMPTY_COMMIT] = { .name="empty_commit", .i_val=OPT__YES, - .parse=opt___string2val, .parm=opt___yes_no_auto+1, + .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__DELAY] = { .name="delay", .i_val=OPT__NO, @@ -193,7 +248,7 @@ }, [OPT__COPYFROM_EXP] = { .name="copyfrom_exp", .i_val=OPT__YES, - .parse=opt___string2val, .parm=opt___yes_no_auto+1, + .parse=opt___string2val, .parm=opt___yes_no, }, }; @@ -212,7 +267,8 @@ /** Find an integer value by comparing with predefined strings. */ -inline int opt___find_string(const struct opt___val_str_t *list, char *string, +inline int opt___find_string(const struct opt___val_str_t *list, + const char *string, int *result) { for(; list->string; list++) @@ -256,6 +312,7 @@ char buffer[strlen(string)+1]; char *cp; + status=0; /* We make a local copy, so we can use strsep(). */ strcpy(buffer, string); string=buffer; @@ -307,6 +364,21 @@ } +/** Store a string, or expand a (non-empty) environment variable. */ +int opt___store_env_noempty(struct opt__list_t *ent, char *string, + enum opt__prio_e prio) +{ + /* Not ideal - makes a copy of an environment variable, that + * wouldn't be needed. */ + if (string[0] == '$') string=getenv(string+1); + + if (!string || !*string) + return 0; + + return opt___store_string(ent, string, prio); +} + + /** Parse warning settings. */ int opt___parse_warnings(struct opt__list_t *ent, char *string, enum opt__prio_e prio) @@ -414,6 +486,7 @@ char *sol, *eol; + status=0; strcpy(fn, path); if (name) { @@ -474,13 +547,12 @@ /** -. - * Looks for environment variables with the given \c prefix, and tries to + * Looks for environment variables with the given \c ENV_PREFIX, and tries to * parse them as options. * * Invalid names are ignored, invalid values not. */ int opt__load_env(char **env) { - static const char prefix[]="FSVS_"; int status; char *cur; char buffer[32]; @@ -490,10 +562,10 @@ while ( (cur=*(env++)) ) { - if (strncmp(cur, prefix, strlen(prefix)) == 0) + if (strncmp(cur, ENV_PREFIX, strlen(ENV_PREFIX)) == 0) { DEBUGP("found env %s", cur); - cur += strlen(prefix); + cur += strlen(ENV_PREFIX); for(i=0; cur[i] != '=' && i' shadow * \endcode - * You might want/need to set an update-pipe, too; see + * You might want/need to set an update-pipe, too; see \ref * FSVS_PROP_UPDATE_PIPE for details. * * The only thing left is to take the first backup: @@ -176,6 +178,12 @@ * \note Encrypted data cannot be deltified, so the few marked files will * take their full space in the repository. (Although \c gpg compresses the * files before encryption, so it won't be \b that bad.) + * + * You might be interested in \ref exp_env "exported + * environment variables", too. + * + * \note Another idea is to ignore files that are not readable by everyone; + * see \ref ign_mod "ignore pattern modifiers" for details. * */ #define FSVS_PROP_COMMIT_PIPE FSVS_PROP_PREFIX "commit-pipe" @@ -315,7 +323,17 @@ propval_orig_md5 []=FSVS_PROP_ORIG_MD5; /** @} */ -/* \todo check for existance of entries we'd like to store entries for */ + +/** -. + * + * I thought about using "constant prefix.$random" => "$propertyname" for + * them - but it's more work than simply ignoring them before listing. + * + * And as they're not widely used, it's easier this way. */ +const char prp___to_be_removed_value[]= + "FSVS:INTERNAL-\nto-be-removed\n-" + "\x8f\xc1\xa6\xe5\x86\x0a\x01\x72\x54\x89\x25\x23\x03\xc3\xfa\x75"; + /** -. @@ -433,20 +451,36 @@ * repository; its \ref estat::remote_status is set. * */ int prp__set_from_aprhash(struct estat *sts, - apr_hash_t *props, + apr_hash_t *props, + enum prp__set_from_aprhash_e flags, apr_pool_t *pool) { int status; apr_hash_index_t *hi; char *prop_key; + apr_ssize_t prop_key_len; + svn_string_t *prop_val; hash_t db; int to_store, count; void *k, *v; + status=0; count=0; + + /* We always open the database file. If no user-specified properties are + * given, old properties are removed that way. + * (Needed because we'd only know in cb__record_changes() that properties + * get removed; in revert we only have the new list. + * TODO: Merge local and remote changes.) */ + /* We remember the filename, so that empty hashes get removed on close. + * */ db=NULL; + if (flags & STORE_IN_FS) + STOPIF( prp__open_byestat(sts, + GDBM_NEWDB | HASH_REMEMBER_FILENAME, &db), NULL); + for (hi = apr_hash_first(pool, props); hi; hi = apr_hash_next(hi)) { /* As the name/key is a (char*), we don't need its length. */ @@ -455,7 +489,7 @@ * whatever needed in subsequent calls - which isn't pretty, too. */ k=&prop_key; v=&prop_val; - apr_hash_this(hi, k, NULL, v); + apr_hash_this(hi, k, &prop_key_len, v); to_store=0; STOPIF( up__parse_prop(sts, prop_key, prop_val, @@ -463,23 +497,25 @@ if (to_store) { - if (!db) - STOPIF( prp__open_byestat(sts, GDBM_NEWDB, &db), NULL); - - /** \todo - store in utf-8? local encoding? - * What if it's binary??? Better do no translation, ie store as - * UTF-8. */ - STOPIF( prp__set_svnstr(db, prop_key, prop_val), NULL); + if (db) + { + /** \todo - store in utf-8? local encoding? + * What if it's binary??? Better do no translation, ie store as + * UTF-8. */ + STOPIF( prp__set_svnstr(db, prop_key, prop_val), NULL); + } count++; } + else + { + /* If already used it's no longer needed. */ + if (flags & ONLY_KEEP_USERDEF) + apr_hash_set(props, prop_key, prop_key_len, NULL); + } } - if (db || count) - { - DEBUGP("%d properties stored", count); - BUG_ON(! (db && count) ); - STOPIF( hsh__close(db, status), NULL); - } + DEBUGP("%d properties stored", count); + STOPIF( hsh__close(db, status), NULL); ex: return status; @@ -514,12 +550,20 @@ { STOPIF( prp__open_byname( *normalized, GDBM_WRCREAT, &db), NULL); - STOPIF( hsh__fetch(db, key, &value), NULL); - if (value.dptr) + status=prp__fetch(db, key, &value); + if (status == ENOENT) + { + DEBUGP("No such property"); + } + else if (status) { - status=fputs(value.dptr, output); - status|=fputc('\n', output); - if (status <0) break; + /* Any other error means trouble. */ + STOPIF( status, NULL); + } + else if (value.dptr && !prp__prop_will_be_removed(value)) + { + STOPIF_CODE_EPIPE( fputs(value.dptr, output), NULL); + STOPIF_CODE_EPIPE( fputc('\n', output), NULL); } STOPIF( hsh__close(db, status), NULL); @@ -570,8 +614,9 @@ if (action->i_val == FS_REMOVED) { - value.dptr=NULL; - value.dsize=0; + value.dptr=(char*)prp___to_be_removed_value; + /* + \0 */ + value.dsize=strlen(prp___to_be_removed_value)+1; } else { @@ -660,7 +705,7 @@ { int status; hash_t props; - datum value; + datum value; props=NULL; @@ -687,7 +732,7 @@ * */ int prp__l_work(struct estat *root, int argc, char *argv[]) { - int status, i, count; + int status, count; int many_files; char indent[5]=" "; hash_t db; @@ -697,6 +742,7 @@ status=0; + db=NULL; if (!argc) ac__Usage_this(); @@ -722,32 +768,36 @@ while (status == 0) { DEBUGP("got key with len=%d: %.30s", key.dsize, key.dptr); - count++; - if (count==1 && many_files) - printf("Properties of %s:\n", *normalized); + STOPIF( prp__fetch(db, key, &data), NULL); + if (prp__prop_will_be_removed(data)) + { + /* This property will be removed on next commit. */ + } + else + { + count++; - i=fputs(indent, output); - /* The key and value are defined to have a \0 at the end. - * This should not be printed. */ - i|=hlp__safe_print(output, key.dptr, key.dsize-1); + if (count==1 && many_files) + STOPIF_CODE_EPIPE( printf("Properties of %s:\n", *normalized), NULL); - if (opt_verbose>0) - { - STOPIF( hsh__fetch(db, key, &data), NULL); + STOPIF_CODE_EPIPE( fputs(indent, output), NULL); + /* The key and value are defined to have a \0 at the end. + * This should not be printed. */ + STOPIF( hlp__safe_print(output, key.dptr, key.dsize-1), NULL); - fputc('=',output); - i|=hlp__safe_print(output, data.dptr, data.dsize-1); + if (opt_verbose>0) + { + STOPIF_CODE_EPIPE( fputc('=',output), NULL); + STOPIF( hlp__safe_print(output, data.dptr, data.dsize-1), NULL); - free(data.dptr); - } + free(data.dptr); + } - fputc('\n', output); + STOPIF_CODE_EPIPE( fputc('\n', output), NULL); + } status=prp__next(db, &key, &key); - - /* SIGPIPE or similar? */ - if (i<0) break; } if (count == 0) diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/props.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/props.h --- fsvs-1.1.14/src/props.h 2008-03-11 07:15:31.000000000 +0000 +++ fsvs-1.1.17/src/props.h 2008-06-05 07:30:17.000000000 +0100 @@ -6,8 +6,8 @@ * published by the Free Software Foundation. ************************************************************************/ -#ifndef __REMOTE_H__ -#define __REMOTE_H__ +#ifndef __PROPS_H__ +#define __PROPS_H__ /** \file * Property handling header file - \ref prop-set, \ref prop-get, \ref @@ -40,10 +40,19 @@ const char *name, const svn_string_t *utf8_value); + +/** Bitmasks for prp__set_from_aprhash() operation. */ +enum prp__set_from_aprhash_e { + DEFAULT=0, + STORE_IN_FS=1, + ONLY_KEEP_USERDEF=2, +}; + /** Writes the given set of properties of \a sts into its \ref prop file. * */ int prp__set_from_aprhash(struct estat *sts, apr_hash_t *props, + enum prp__set_from_aprhash_e flags, apr_pool_t *pool); /** Wrapper functions, if we need to have some compatibility layer. */ @@ -109,5 +118,15 @@ /** Prop-list worker function. */ work_t prp__l_work; + +/** Value string for to-be-removed properties. */ +extern const char prp___to_be_removed_value[]; +/** Test function for to-be-removed properties. */ +static inline int prp__prop_will_be_removed(datum data) +{ + return strcmp(data.dptr, prp___to_be_removed_value) == 0; +} + + #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/racallback.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/racallback.c --- fsvs-1.1.14/src/racallback.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/racallback.c 2008-10-02 18:41:54.000000000 +0100 @@ -1,5 +1,5 @@ /************************************************************************ - * Copyright (C) 2005-2007-2008 Philipp Marek. + * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as @@ -12,12 +12,12 @@ * */ #include +#include #include #include #include #include -#include #include @@ -27,6 +27,7 @@ #include "update.h" #include "est_ops.h" #include "checksum.h" +#include "cache.h" #include "url.h" #include "racallback.h" @@ -39,21 +40,21 @@ svn_config_t *cfg; - status=0; - STOPIF_SVNERR(svn_config_get_config, - (&cfg_hash, NULL, pool) ); + STOPIF( hlp__get_svn_config(&cfg_hash), NULL); cfg = apr_hash_get(cfg_hash, SVN_CONFIG_CATEGORY_CONFIG, APR_HASH_KEY_STRING); + /* Set up Authentication stuff. */ STOPIF_SVNERR( svn_cmdline_setup_auth_baton, (&cb__cb_table.auth_baton, !(isatty(STDIN_FILENO) && isatty(STDOUT_FILENO)), - NULL, /* Username */ + opt__get_int(OPT__AUTHOR) ? + opt__get_string(OPT__AUTHOR) : NULL, NULL, /* Password */ - NULL, /* Config dir */ - 1, /* no_auth_cache */ + opt__get_string(OPT__CONFIG_DIR), + 0, /* no_auth_cache */ cfg, NULL, /* cancel function */ NULL, /* cancel baton */ @@ -78,29 +79,10 @@ apr_pool_t *pool) { int status; - static char *buffer=NULL, *fn_pos; - const char *tmp; - int len; - char filename[]="fsvs.XXXXXX"; - if (!buffer) - { - STOPIF( apr_temp_dir_get(&tmp, pool), - "Getting a temporary directory path"); - len=strlen(tmp); - /* Directory PATH_SEPARATOR Filename '\0' (+space) */ - buffer=malloc(len + 1 + strlen(filename) + 1 + 4); - STOPIF_ENOMEM(!buffer); - - strcpy(buffer, tmp); - fn_pos=buffer+len; - *(fn_pos++) = PATH_SEPARATOR; - } - - strcpy(fn_pos, filename); - STOPIF( apr_file_mktemp(fp, buffer, 0, pool), - "Cannot create a temporary file"); + STOPIF( waa__get_tmp_name( NULL, NULL, fp, pool), NULL); + ex: RETURN_SVNERR(status); } @@ -134,88 +116,143 @@ } -void cb___mark_parents(struct estat *sts) -{ - /* mark the entry as to-be-done. - * mark the parents too, so that we don't have to search - * in-depth. */ - while (sts->parent && !(sts->parent->remote_status & FS_CHILD_CHANGED)) - { - sts->parent->remote_status |= FS_CHILD_CHANGED; - sts=sts->parent; - } -} - - -/** This function adds a new entry below dir, setting it to - * \c FS_NEW or \c FS_REPLACED. */ -int cb___add_entry(struct estat *dir, - const char *utf8_path, +/** If \a may_create is \c 0, \c ENOENT may be returned (ie. was not + * found). + * + * If \a mode doesn't include some permission bits, like \c 0700 or \c + * 0600, a default value is chosen. + * + * If it didn't exist, or if this is a higher priority URL, the parents get + * FS_CHILD_CHANGED set. + * + * \a path gets set (if not \c NULL) to \a utf8_path in local encoding. */ +int cb__add_entry(struct estat *dir, + const char *utf8_path, char **loc_path, const char *utf8_copy_path, svn_revnum_t copy_rev, int mode, int *has_existed, + int may_create, void **new) { int status; - struct estat *sts; + struct estat *sts, *copy; const char *filename; char* path; char* copy_path; + int overwrite; + copy=NULL; + overwrite=0; STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); + if (loc_path) *loc_path=path; STOPIF( hlp__utf82local(utf8_copy_path, ©_path, -1), NULL ); STOPIF_CODE_ERR(copy_path, EINVAL, "don't know how to handle copy_path %s@%ld", copy_path, copy_rev); - DEBUGP("add entry %s, mode 0%03o", path, mode); - /* The path should be done by open_directory descending. + /* The path should be done by open_directory descending. * We need only the file name. */ filename = ops__get_filename(path); STOPIF( ops__find_entry_byname(dir, filename, &sts, 0), "cannot lookup entry %s", path); + DEBUGP("entry %s, mode 0%03o; %sfound, may %screate", path, mode, + sts ? "" : "not ", + may_create ? "" : "not "); + if (sts) { + if (has_existed) *has_existed=EEXIST; + + if (!url__current_has_precedence(sts->url)) + goto no_change; + /* This file already exists, or an update from another URL just * brought it in. * * The caller knows whether we should overwrite it silently. */ if (sts->remote_status & FS_REMOVED) + { sts->remote_status = FS_REPLACED; - if (has_existed) *has_existed=EEXIST; - if (!url__current_has_precedence(sts->url)) - goto no_change; + /* Then store the old values. */ + STOPIF( ops__allocate(1, ©, NULL), NULL); + /* The by_inode and by_name arrays of the parent might point to the old + * location; rather than searching and changing them, we simply copy + * the old data, and clean the references in sts. */ + memcpy(copy, sts, sizeof(*copy)); + overwrite=1; + copy->remote_status=FS_REMOVED; + } } else { + STOPIF_CODE_ERR(!may_create, ENOENT, NULL); + STOPIF( ops__allocate(1, &sts, NULL), NULL); + /* To avoid the memory allocator overhead we would have to do our own + * memory management here - eg. using dir->string. + * But that would have to be tuned for performance - we get here often. + * TODO. + * */ + memset(sts, 0, sizeof(*sts)); sts->name = strdup(filename); STOPIF_ENOMEM(!sts->name); - sts->parent = dir; sts->remote_status = FS_NEW; /* Put into tree */ STOPIF( ops__new_entries(dir, 1, &sts), NULL); if (has_existed) *has_existed=0; + + dir->remote_status |= FS_CHANGED; + overwrite=1; } - cb___mark_parents(sts); - sts->st.mode = mode | 0700; /* until we know better */ - sts->entry_type = ops___filetype( &(sts->st) ); - sts->url=current_url; - /* To avoid EPERM on chmod() etc. */ - sts->st.uid=getuid(); - sts->st.gid=getgid(); + if (overwrite) + { + sts->parent=dir; + + /* This memset above implicitly clears all other references to the copy + * data - entry_count, by_inode, by_name, strings. + * But we need the copy itself. */ + sts->entry_count=0; + sts->by_inode=NULL; + sts->by_name=NULL; + sts->strings=NULL; + + sts->decoder=NULL; + sts->has_orig_md5=0; + memset(& sts->md5, 0, sizeof(sts->md5)); + + memset(& sts->st, 0, sizeof(sts->st)); + + /* Some permission bits must be set; suid/sgid/sticky are not enough. + * Directories need an "x" bit, too. + * */ + if (!(mode & 0777)) + mode |= S_ISDIR(mode) ? 0700 : 0600; + /* Until we know better */ + sts->updated_mode = sts->st.mode = mode; + +/* Default is current time. */ + time( & sts->st.mtim.tv_sec ); + + /* To avoid EPERM on chmod() etc. */ + sts->st.uid=getuid(); + sts->st.gid=getgid(); + + sts->old=copy; + if (copy) + copy->cache_index=0; + } sts->url=current_url; + ops__mark_parent_cc(sts, remote_status); - dir->remote_status |= FS_CHANGED; no_change: /* Even if this entry has lower priority, we have to have a baton for it. @@ -227,29 +264,62 @@ } -int cb___open_entry(const char *utf8_path, - struct estat *dir, - void **child_baton) +inline int cb___store_prop(struct estat *sts, + const char *utf8_name, const svn_string_t *value, + apr_pool_t *pool) { - struct estat *sts; int status; - char* path; - + int user_prop; + apr_pool_t *u_p_pool; + char *copy; + #ifdef DEBUG + static long u_p_count=0, + u_p_bytes=0; + #endif - STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); status=0; - STOPIF( ops__find_entry_byname(dir, path, &sts, 0), - "cannot find entry %s", path); + if (!url__current_has_precedence(sts->url)) goto ex; - if (!sts) status=ENOENT; - else + + user_prop=0; + STOPIF( up__parse_prop(sts, utf8_name, value, &user_prop, pool), NULL); + ops__mark_parent_cc(sts, remote_status); + DEBUGP("have name=%s; user? %d", utf8_name, user_prop); + + + if (action->keep_user_prop && user_prop) { - if (!sts->url || url__current_has_precedence(sts->url)) - sts->url = current_url; + if (!sts->user_prop) + { + /* The root entry has no associated URL, so it has no pool. + * Normally there shouldn'd be any user-properties, though. */ + STOPIF( apr_pool_create(&u_p_pool, sts->url ? + sts->url->pool : global_pool), NULL); + + sts->user_prop=apr_hash_make(u_p_pool); + apr_hash_set(sts->user_prop, "", 0, u_p_pool); + } + else + u_p_pool=apr_hash_get(sts->user_prop, "", 0); + + + /* apr_hash_set() only stores an address; we have to take care to not + * loose the length and data, because the pool they're in might be + * invalid after closing this entry. */ + copy=apr_palloc(u_p_pool, strlen(utf8_name)+1); + strcpy(copy, utf8_name); + apr_hash_set(sts->user_prop, copy, APR_HASH_KEY_STRING, + svn_string_dup(value, u_p_pool) ); + +#ifdef ENABLE_DEBUG + u_p_count++; + u_p_bytes += value->len + sizeof(char*) + sizeof(*value); + DEBUGP("%lu user-props stored, with %lu bytes.", + u_p_count, u_p_bytes); +#endif } - *child_baton = sts; ex: return status; } @@ -299,8 +369,9 @@ if (sts) { DEBUGP("deleting entry %s", path); - cb___mark_parents(sts); - dir->remote_status |= FS_CHANGED; + + ops__mark_parent_cc(sts, remote_status); + sts->remote_status = FS_REMOVED; if (action->repos_feedback) @@ -334,8 +405,8 @@ int status; int has_existed; - STOPIF( cb___add_entry(dir, utf8_path, utf8_copy_path, - copy_rev, S_IFDIR, &has_existed, + STOPIF( cb__add_entry(dir, utf8_path, NULL, utf8_copy_path, + copy_rev, S_IFDIR, &has_existed, 1, child_baton), NULL ); sts=*child_baton; @@ -364,18 +435,9 @@ struct estat *dir=parent_baton; int status; - status=cb___open_entry( utf8_path, dir, child_baton); - if (status == ENOENT) - { - /** \todo conflict - removed locally? added */ - STOPIF( cb___add_entry(dir, utf8_path, NULL, 0, - S_IFDIR, NULL, child_baton), NULL ); - cb___mark_parents(dir); - } - else - { - STOPIF( status, NULL); - } + /** \todo conflict - removed locally? added */ + STOPIF( cb__add_entry(dir, utf8_path, NULL, NULL, 0, + S_IFDIR, NULL, 0, child_baton), NULL); ex: RETURN_SVNERR(status); @@ -387,17 +449,10 @@ const svn_string_t *value, apr_pool_t *pool) { - struct estat *sts=dir_baton; int status; - status=0; - -/** The root entry has per definition \b no URL. */ - if (!sts->url || url__current_has_precedence(sts->url)) - { - STOPIF( up__parse_prop(sts, utf8_name, value, NULL, pool), NULL); - cb___mark_parents(sts); - } + /* We do this additional call to get a meaningful backtrace. */ + STOPIF( cb___store_prop(dir_baton, utf8_name, value, pool), NULL); ex: RETURN_SVNERR(status); @@ -412,7 +467,7 @@ sts->repos_rev = cb___dest_rev; if (action->repos_feedback) - STOPIF( action->repos_feedback(sts), NULL); + STOPIF( action->repos_feedback(sts), NULL); ex: return status; @@ -426,6 +481,10 @@ struct estat *sts=dir_baton; int status; + /* Release some memory; that was likely needed by cb__add_entry(), but is no + * longer. */ + IF_FREE(sts->by_name); + STOPIF( cb___close(sts), NULL); ex: @@ -457,8 +516,8 @@ struct estat *sts; int status; - STOPIF( cb___add_entry(dir, utf8_path, utf8_copy_path, - copy_rev, S_IFREG, NULL, file_baton), + STOPIF( cb__add_entry(dir, utf8_path, NULL, utf8_copy_path, + copy_rev, S_IFREG, NULL, 1, file_baton), NULL); sts=*file_baton; @@ -484,26 +543,16 @@ struct estat *dir=parent_baton; struct estat *sts; int status; + int was_there; - status=cb___open_entry( utf8_path, dir, file_baton); - if (status == ENOENT) - { - /** \todo conflict - removed locally */ - STOPIF( cb___add_entry(dir, utf8_path, NULL, 0, - S_IFREG, NULL, file_baton), NULL); - sts=*file_baton; - if (url__current_has_precedence(sts->url)) - { - cb___mark_parents(dir); - } - } - else - { - STOPIF( status, NULL); - STOPIF( up__fetch_decoder(*(struct estat**)file_baton), NULL); - } + STOPIF( cb__add_entry(dir, utf8_path, NULL, NULL, 0, + S_IFREG, &was_there, 0, file_baton), NULL); + sts=(struct estat*)*file_baton; - ((struct estat*)*file_baton)->decoder_is_correct=1; + if (was_there) + STOPIF( up__fetch_decoder(sts), NULL); + + sts->decoder_is_correct=1; ex: RETURN_SVNERR(status); @@ -521,10 +570,7 @@ status=0; if (url__current_has_precedence(sts->url)) - { - sts->remote_status |= FS_CHANGED; - cb___mark_parents(sts); - } + ops__mark_changed_parentcc(sts, remote_status); *handler = cb__txdelta_discard; *handler_baton=sts; @@ -538,15 +584,10 @@ const svn_string_t *value, apr_pool_t *pool) { - struct estat *sts=file_baton; int status; - status=0; - if (url__current_has_precedence(sts->url)) - { - STOPIF( up__parse_prop(sts, utf8_name, value, NULL, pool), NULL); - cb___mark_parents(sts); - } + /* We do this additional call to get a meaningful backtrace. */ + STOPIF( cb___store_prop(file_baton, utf8_name, value, pool), NULL); ex: RETURN_SVNERR(status); @@ -563,16 +604,14 @@ STOPIF( cb___close(sts), NULL); - - BUG_ON(S_ISDIR(sts->st.mode)); - if (!S_ISDIR(sts->st.mode) && text_checksum) - STOPIF( cs__char2md5(text_checksum, sts->md5 ), NULL); - - if (sts->has_orig_md5 || sts->decoder) - DEBUGP("Has an original MD5, %s not used", text_checksum); - else - if (text_checksum) - STOPIF( cs__char2md5(text_checksum, sts->md5 ), NULL); + if (!S_ISDIR(sts->st.mode)) + { + if (sts->has_orig_md5 || sts->decoder) + DEBUGP("Has an original MD5, %s not used", text_checksum); + else + if (text_checksum) + STOPIF( cs__char2md5(text_checksum, sts->md5 ), NULL); + } ex: RETURN_SVNERR(status); @@ -641,6 +680,8 @@ .abort_edit = cb___abort_edit, }; +/** @} */ + int cb___report_path_rev(struct estat *dir, const svn_ra_reporter2_t *reporter, @@ -683,13 +724,30 @@ } +/** -. + * Just a proxy; calls cb__record_changes_mixed() with the \a root, \a target + * and \a pool, and default values for the rest. */ +int cb__record_changes(struct estat *root, + svn_revnum_t target, + apr_pool_t *pool) +{ + int status; + + STOPIF( cb__record_changes_mixed(root, target, + NULL, 0, pool), NULL); +ex: + return status; +} + /** -. * Calls the svn libraries and records which entries would be changed - * on this update. - * \param session An opened session + * on this update on \c current_url. * \param root The root entry of this wc tree * \param target The target revision. \c SVN_INVALID_REVNUM is not valid. + * \param other_paths A \c NULL-terminated list of paths that are sent to + * the svn_ra_reporter2_t::set_path(). + * \param other_revs The revision to be sent for \a other_paths. * \param pool An APR-pool. * * When a non-directory entry gets replaced by a directory, its @@ -700,22 +758,38 @@ * If a non-directory gets replaced by a directory, \c entry_count and * \c by_inode are kept - we need them for up__rmdir() to remove * known child entries. + * + * Please note that it's not possible to run \e invisible entries (that are + * not seen because some higher priority URL overlays them) to run as \c + * baton==NULL (although that would save quite a bit of + * url__current_has_precedence() calls), because it's possible that some + * file in a directory below can be seen. + * + * \a other_paths is a \c NULL -terminated list of pathnames (which may + * have the \c "./" in front, ie. the \e normalized paths) that are to be + * reported at revision \a other_revs. + * + * If \a other_paths is \c NULL, or doesn't include an "." entry, + * the WC root is reported to be at \c current_url->current_rev or, if this + * is \c 0, to be at \a target, but empty. * */ -int cb__record_changes(svn_ra_session_t *session, - struct estat *root, +int cb__record_changes_mixed(struct estat *root, svn_revnum_t target, + char *other_paths[], svn_revnum_t other_revs, apr_pool_t *pool) { int status; svn_error_t *status_svn; void *report_baton; const svn_ra_reporter2_t *reporter; + int sent_wcroot; + char *cur, **op; status=0; cb___dest_rev=target; STOPIF_SVNERR( svn_ra_do_status, - (session, + (current_url->session, &reporter, &report_baton, "", @@ -725,26 +799,71 @@ root, pool) ); + sent_wcroot=0; + cur=NULL; + op=NULL; + if (other_paths) + { + op=other_paths; + while ( (cur=*op) ) + { + if (cur[0] == '.' && cur[1] == 0) + break; + op++; + } + } + /* If this is a checkout, we need to set the base directory at HEAD, but * empty. We cannot use the base at revision 0, because it probably didn't * exist there. */ - if (current_url->current_rev == 0) + if (cur) + STOPIF_SVNERR( reporter->set_path, + (report_baton, + "", other_revs, + FALSE, NULL, pool)); + else if (current_url->current_rev == 0) STOPIF_SVNERR( reporter->set_path, (report_baton, "", target, - TRUE, NULL, global_pool)); + TRUE, NULL, pool)); else STOPIF_SVNERR( reporter->set_path, (report_baton, "", current_url->current_rev, - FALSE, NULL, global_pool)); + FALSE, NULL, pool)); + + if (other_paths) + { + /* The root entry must be the first to be reported (because of + * subversion/libsvn_repos/reporter.c). + * So we have to loop through the list - in case the user does + * "fsvs diff file ." + * or something like that. */ + while ( (cur=*other_paths) ) + { + /* cur loops through the entries, but *op is still set. */ + if (op != other_paths) + { + DEBUGP("reporting %s@%llu", cur, (t_ull)other_revs); + + if (cur[0] == '.' && cur[1] == PATH_SEPARATOR) + cur+=2; + + STOPIF_SVNERR( reporter->set_path, + (report_baton, cur, other_revs, FALSE, NULL, pool)); + } + + other_paths++; + } + } + DEBUGP("Getting changes from %llu to %llu", (t_ull)current_url->current_rev, (t_ull)target); - #if 0 +#if 0 STOPIF( cb___report_path_rev( root, reporter, report_baton, pool), NULL); - #endif +#endif STOPIF_SVNERR( reporter->finish_report, (report_baton, global_pool)); @@ -755,4 +874,3 @@ return status; } -/** @} */ diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/racallback.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/racallback.h --- fsvs-1.1.14/src/racallback.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/racallback.h 2008-06-20 06:02:51.000000000 +0100 @@ -22,9 +22,25 @@ svn_error_t *cb__init(apr_pool_t *pool); /** A change-recording editor. */ -int cb__record_changes(svn_ra_session_t *session, - struct estat *root, +int cb__record_changes(struct estat *root, svn_revnum_t target, apr_pool_t *pool); +/** Like cb__record_changes(), but allowing mixed reporting. */ +int cb__record_changes_mixed(struct estat *root, + svn_revnum_t target, + char *other_paths[], svn_revnum_t other_revs, + apr_pool_t *pool); + + +/** This function adds a new entry below dir, setting it to + * \c FS_NEW or \c FS_REPLACED. */ +int cb__add_entry(struct estat *dir, + const char *utf8_path, char **loc_path, + const char *utf8_copy_path, + svn_revnum_t copy_rev, + int mode, + int *has_existed, + int may_create, + void **new); #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/resolve.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/resolve.c --- fsvs-1.1.14/src/resolve.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/resolve.c 2008-10-02 18:38:00.000000000 +0100 @@ -104,7 +104,7 @@ { /* We're not going recursively, so there's no need to process * sub-entries. */ - sts->entry_type=FT_IGNORE; + sts->to_be_ignored=1; } else { @@ -197,12 +197,12 @@ STOPIF( url__load_nonempty_list(NULL, 0), NULL); -/* Maybe we should add a flag saying that we don't want unknown entries, - * like it can easily happen with "fsvs resolve *". - * But then we'd get an error, and this is not so user-friendly like just - * ignoring these entries in res__action(). */ + /* Maybe we should add a flag saying that we don't want unknown entries, + * like it can easily happen with "fsvs resolve *". + * But then we'd get an error, and this is not so user-friendly like just + * ignoring these entries in res__action(). */ status=waa__read_or_build_tree(root, argc, normalized, argv, NULL, 1); - if (status == ENOENT) + if (status == -ENOENT) STOPIF(status, "!No data about current entries is available."); STOPIF(status, NULL); diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/revert.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/revert.c --- fsvs-1.1.14/src/revert.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/revert.c 2008-10-29 07:18:35.000000000 +0000 @@ -20,7 +20,6 @@ #include "resolve.h" #include "checksum.h" #include "props.h" -#include "cache.h" #include "helper.h" #include "url.h" #include "update.h" @@ -51,7 +50,7 @@ * the copy source data. * - An unmodified direct copy destination entry, and other uncommitted * entries with special flags (manually added, or defined as copied), are - * changed back to "N"ew -- the copy definition and the special + * changed back to "N"ew -- the copy definition and the special * status is removed. \n * Please note that on implicitly copied entries (entries that are marked * as copied because some parent directory is the base of a copy) \b @@ -118,141 +117,98 @@ * * \subsection rev_copied Working with copied entries * If an entry is marked as copied from another entry (and not committed!), - * a \c revert will undo the copy setting - which will make the entry - * unknown again, and reported as new on the next invocations. + * a \c revert will fetch the original copyfrom source. To undo the copy + * setting use the \ref uncp command. * - * If a directory structure was copied, and the current entry is just a - * implicitly copied entry, \c revert would take the copy source as - * reference, and get the file data from there. - * - * Summary: Only the base of a copy can be un-copied. * */ + +/** List of (bit-)flags for rev___undo_change(). + * These have an order, ie. SET_CURRENT overrides REVERT_MTIME. */ +enum rev___dir_change_flag_e { + NOT_CHANGED=0, + REVERT_MTIME=1, + SET_CURRENT=2, + GET_TSTAMP=0x1000, +}; + /** A count of files reverted in \b this run. */ static int number_reverted=0; static svn_revnum_t last_rev; +#define REV___GETFILE_MAX_CACHE (4) + + /** -. - * This function fetches a file from the current repository, and - * applies it locally. * - * Meta-data is set; an existing local entry gets atomically removed by \c - * rename(). - * The \c sts->entry_type \b must be correct. - * \c current_url \b must match, and be open. + * This function fetches an non-directory entry \a loc_url from the + * repository in \c current_url, and writes it to \a output - which gets + * closed via \c svn_stream_close(). * - * If \a only_tmp is not \c NULL, the temporary file is not renamed - * to the real name; instead its path is returned, in a buffer which must - * not be freed. There is a small number of such buffers used round-robin; - * at least two slots are always valid. \n - * No manber-hashes are done in this case. + * \a decoder should be set correctly. + * \todo if it's \c NULL, but an update-pipe is set on the entry, the data + * has to be read from disk again, to be correctly processed. * - * If \a url_to_use is not \c NULL, it is taken as source, and so must not - * be a directory. - */ -#define REV___GETFILE_MAX_CACHE (4) -int rev__get_file(struct estat *sts, - svn_revnum_t revision, - char *url_to_use, - svn_revnum_t *fetched, - char **only_tmp, + * No meta-data is set, and the \c svn:special attribute is ignored. + * + * The revision number must be valid, it may not be \c SVN_INVALID_REVNUM. + * + * If \a sts_for_manber is \c NULL, no manber hashes are calculated. + * + * If \a output_sts is \c NULL, the meta-data properties are kept in \a + * props; else its fields are filled (as far as possible) with data. That + * includes the estat::repos_rev field. + * + * The user-specified properties can be returned in \a props. + * + * */ +int rev__get_text_to_stream( char *loc_url, svn_revnum_t revision, + const char *decoder, + svn_stream_t *output, + struct estat *sts_for_manber, + struct estat *output_sts, + apr_hash_t **props, apr_pool_t *pool) { int status; svn_error_t *status_svn; - - static struct cache_t *cache=NULL; - - char *filename_tmp; - char *utf8_path; - - char *filename; - apr_hash_t *props; - svn_stream_t *stream; - apr_file_t *a_stream; svn_string_t *prop_val; - apr_pool_t *subpool; - svn_stringbuf_t *target_stringbuf; - const char *encoder_str; struct encoder_t *encoder; - char *relative_to_url; + char *relative_url, *utf8_url; + apr_hash_t *properties; + char target_rev[10]; - STOPIF( cch__new_cache(&cache, 8), NULL); - target_stringbuf=NULL; encoder=NULL; - - BUG_ON(!pool); - - STOPIF( ops__build_path(&filename, sts), NULL); - /* We cannot give the filename as parameter to copy. - * We need a few bytes more for the unique part, and this additional - * length might not be addressible any more. */ - STOPIF( cch__add(cache, 0, NULL, sts->path_len+16, &filename_tmp), NULL); + status=0; + DEBUGP("getting file %s@%s from %s", loc_url, + hlp__rev_to_string(revision), current_url->url); - if (!url_to_use) + if (strncmp(loc_url, "./", 2) == 0) { /* Skip ./ in front. */ - relative_to_url=filename+2; - } - else - { - /* Verify that the correct URL is taken. */ - if (strncmp(current_url->url, url_to_use, current_url->urllen) == 0) - { - DEBUGP("%s matches current_url.", url_to_use); - } - else - { - STOPIF(EINVAL, "%s not below %s", url_to_use, current_url->url); - } - - relative_to_url=url_to_use + current_url->urllen+1; + relative_url=loc_url+2; } - - DEBUGP("getting file %s@%llu from %s", - relative_to_url, (t_ull)revision, current_url->url); - - - /* We could use a completely different mechanism for temp-file-names; - * but keeping it close to the target lets us see if we're out of - * disk space in this filesystem. (At least if it's not a binding mount - * or something similar - but then rename() should fail). - * If we wrote the data somewhere else, we'd risk moving it again - - * across filesystem boundaries. */ - strcpy(filename_tmp, filename); - strcat(filename_tmp, ".XXXXXX"); - - - STOPIF( apr_pool_create(&subpool, pool), - "Creating the filehandle pool"); - - if (sts->entry_type == FT_FILE) - { - /* Files get written in files. */ - STOPIF( apr_file_mktemp(&a_stream, filename_tmp, - APR_WRITE | APR_CREATE | APR_EXCL | APR_READ | APR_TRUNCATE, - subpool), - "Cannot open/create file %s", filename_tmp); - stream=svn_stream_from_aprfile(a_stream, subpool); - } else { - /* Special entries are typically smaller than 1kByte, and get stored in - * an in-memory buffer. */ - target_stringbuf=svn_stringbuf_create("", subpool); + /* It could be an absolute value. */ - stream=svn_stream_from_stringbuf(target_stringbuf, subpool); + /* Verify that the correct URL is taken. + * The "/" that's not stored at the end of the URL must be there, too. + * */ + if (strncmp(current_url->url, loc_url, current_url->urllen) == 0 && + loc_url[current_url->urllen] == '/') + loc_url += current_url->urllen+1; + + /* If the string doesn't match, it better be a relative value already + * ... else we'll get an error. */ + // else STOPIF(EINVAL, "%s not below %s", loc_url, current_url->url); } - - /* When we get a file, every old manber-hashes are stale. - * So remove them; if the file is big enough, we'll recreate it with - * correct data. */ - STOPIF( waa__delete_byext(filename, WAA__FILE_MD5s_EXT, 1), NULL); + STOPIF( hlp__local2utf8(loc_url, &utf8_url, -1), NULL); /* Symlinks have a MD5, too ... so just do that here. */ @@ -262,13 +218,9 @@ * We need to get the MD5 anyway; there's svn_stream_checksummed(), * but that's just one chainlink more, and so we simply use our own * function. */ - if (!action->is_import_export && !only_tmp) - STOPIF( cs__new_manber_filter(sts, stream, &stream, - subpool), - NULL); - - /* We need to skip the ./ in front. */ - STOPIF( hlp__local2utf8(relative_to_url, &utf8_path, -1), NULL); + if (sts_for_manber) + STOPIF( cs__new_manber_filter(sts_for_manber, + output, &output, pool), NULL); /* If there's a fsvs:update-pipe, we would know when we have the file @@ -299,93 +251,282 @@ * use network, or use disk. * For a local-remote diff we could pipe the data into the diff program; * but that wouldn't work for remote-remote diffing, as diff(1) doesn't - * accept arbitrary filehandles as input. - **/ - if (sts->decoder_is_correct) - { - encoder_str=sts->decoder; - /* \todo: are there cases where we haven't seen the properties? - * Call up__fetch_encoder_str()? Should know whether the value is valid, or - * just not set yet. */ - } - else + * accept arbitrary filehandles as input (and /proc/self/fd/ isn't + * portable). */ + + /* Fetch decoder from repository. */ + if (decoder == DECODER_UNKNOWN) { - /* Get the correct value for the update-pipe. */ - STOPIF_SVNERR( svn_ra_get_file, + STOPIF_SVNERR_TEXT( svn_ra_get_file, (current_url->session, - utf8_path, - revision, - NULL, + loc_url, revision, NULL, - &props, - subpool) ); + &revision, &properties, + pool), + "Fetching entry \"%s/%s\"@%s", + current_url->url, + loc_url, hlp__rev_to_string(revision)); - prop_val=apr_hash_get(props, propval_updatepipe, APR_HASH_KEY_STRING); - encoder_str=prop_val ? prop_val->data : NULL; + prop_val=apr_hash_get(properties, propval_updatepipe, APR_HASH_KEY_STRING); + decoder=prop_val ? prop_val->data : NULL; } - DEBUGP("updatepipe found as %s", encoder_str); + /* First decode, then do manber-hashing. As the filters are prepended, we * have to do that after the manber-filter. */ - if (encoder_str) + if (decoder) { - STOPIF( hlp__encode_filter(stream, encoder_str, 1, - &stream, &encoder, subpool), NULL); - encoder->output_md5= &(sts->md5); + snprintf(target_rev, sizeof(target_rev), + "%llu", (t_ull)revision); + setenv(FSVS_EXP_TARGET_REVISION, target_rev, 1); + + STOPIF( hlp__encode_filter(output, decoder, 1, + loc_url, &output, &encoder, pool), NULL); + if (output_sts) + encoder->output_md5= &(output_sts->md5); } - if (!fetched) fetched=&revision; - /* \a fetched only gets set for \c SVN_INVALID_REVNUM , so set a default. */ - *fetched=revision; - STOPIF_SVNERR( svn_ra_get_file, + STOPIF_SVNERR_TEXT( svn_ra_get_file, (current_url->session, - utf8_path, - revision, - stream, - fetched, - &props, - subpool) ); - DEBUGP("got revision %llu", (t_ull)*fetched); + loc_url, revision, + output, + &revision, &properties, + pool), + "Fetching entry %s/%s@%s", + current_url->url, + loc_url, hlp__rev_to_string(revision)); + DEBUGP("got revision %llu", (t_ull)revision); + + /* svn_ra_get_file doesn't close the stream. */ + STOPIF_SVNERR( svn_stream_close, (output)); + output=NULL; + + if (output_sts) + { + output_sts->repos_rev = revision; + STOPIF( prp__set_from_aprhash( output_sts, properties, + ONLY_KEEP_USERDEF, pool), NULL); + } + + if (props) + *props=properties; + + +ex: + return status; +} + + +/** -. + * Mostly the same as \c rev__get_text_to_stream(), but returning a + * (temporary) \a filename based on \a filename_base, if this is not \c + * NULL. + * + * If \a filename_base is \c NULL, the file will be put in a real temporary + * location. + * + * \a output_stat is used to store the parsed properties of the entry. + * */ +int rev__get_text_to_tmpfile(char *loc_url, svn_revnum_t revision, + char *encoder, + char *filename_base, char **filename, + struct estat *sts_for_manber, + struct estat *output_sts, apr_hash_t **props, + apr_pool_t *pool) +{ + int status; + apr_file_t *apr_f; + svn_stream_t *output; + + + status=0; + + STOPIF( waa__get_tmp_name( filename_base, filename, &apr_f, pool), NULL); + output=svn_stream_from_aprfile(apr_f, pool); + + STOPIF( rev__get_text_to_stream( loc_url, revision, encoder, + output, sts_for_manber, output_sts, props, pool), NULL); /* svn_ra_get_file() doesn't close. */ - STOPIF_SVNERR( svn_stream_close, (stream)); + STOPIF( apr_file_close(apr_f), NULL); + +ex: + return status; +} + + +/** -. + * + * Does no validation of input - might fill entire memory. */ +int rev__get_text_into_buffer(char *loc_url, svn_revnum_t revision, + const char *decoder, + svn_stringbuf_t **output, + struct estat *sts_for_manber, + struct estat *output_sts, + apr_hash_t **props, + apr_pool_t *pool) +{ + int status; + svn_stringbuf_t *string; + svn_stream_t *stream; + + status=0; + string=svn_stringbuf_create("", pool); + stream=svn_stream_from_stringbuf(string, pool); + + STOPIF( rev__get_text_to_stream(loc_url, revision, + decoder, stream, sts_for_manber, output_sts, props, pool), NULL); + + *output=string; +ex: + return status; +} + + +/** -. + * + * Meta-data is set; an existing local entry gets atomically removed by \c + * rename(). + * + * If the entry has no URL defined yet, but has a copy flag set (\c + * RF_COPY_BASE or \c RF_COPY_SUB), this URL is taken. + * + * If \a revision is 0, the \c BASE revision is and \a decoder is used; + * this is the copy base for copied entries. + */ +int rev__install_file(struct estat *sts, svn_revnum_t revision, + char *decoder, + apr_pool_t *pool) +{ + int status; + char *filename; + char *filename_tmp; + apr_hash_t *props; + svn_stream_t *stream; + apr_file_t *a_stream; + apr_pool_t *subpool; + char *special_data; + char *url; + svn_revnum_t rev_to_take; + + + BUG_ON(!pool); + STOPIF( ops__build_path(&filename, sts), NULL); + + + /* We know that we have to do something here; but because the order is + * depth-first, the parent directory isn't done yet (and shouldn't be, + * because it needs permissions and mtime set!). + * So it's possible that the target directory doesn't yet exist. + * + * Note: because we're here for *non-dir* entries, we always have a + * parent. */ + STOPIF( waa__mkdir(filename, 0), NULL); + + + STOPIF( apr_pool_create(&subpool, pool), + "Creating the filehandle pool"); + + + /* When we get a file, old manber-hashes are stale. + * So remove them; if the file is big enough, we'll recreate it with + * correct data. */ + STOPIF( waa__delete_byext(filename, WAA__FILE_MD5s_EXT, 1), NULL); + + + /* Files get written in files; we use the temporarily generated name for + * special entries, too. */ + /* We could use a completely different mechanism for temp-file-names; + * but keeping it close to the target lets us see if we're out of + * disk space in this filesystem. (At least if it's not a binding mount + * or something similar - but then rename() should fail). + * If we wrote the data somewhere else, we'd risk moving it again, across + * filesystem boundaries. */ + STOPIF( waa__get_tmp_name( filename, &filename_tmp, &a_stream, subpool), + NULL); + + + /* It's a bit easier to just take the (small) performance hit, and always + * (temporarily) write the data in a file. + * If it's a special entry, that will just get read immediately back and + * changed to the correct type. + * + * It doesn't really make much difference, as the file is always created + * to get a distinct name. */ + stream=svn_stream_from_aprfile(a_stream, subpool); + + + if (sts->url) + { + STOPIF( hlp__local2utf8( filename+2, &url, -1), NULL); + rev_to_take=sts->repos_rev; + current_url=sts->url; + } + else if (sts->flags & RF___IS_COPY) + { + STOPIF( cm__get_source( sts, filename, &url, &rev_to_take, 0), NULL); + STOPIF( url__find( url, ¤t_url), NULL); + STOPIF( hlp__local2utf8( url, &url, -1), NULL); + } + else + BUG("cannot get file %s", filename); + + if (revision == 0) + { + /* BASE wanted; get decoder. */ + STOPIF( up__fetch_decoder(sts), NULL); + decoder=sts->decoder; + } + else + { + /* Arbitrary revision - get decoder. */ + rev_to_take=revision; + decoder=DECODER_UNKNOWN; + } + + + STOPIF( url__open_session(NULL), NULL); + + /* We don't give an estat for meta-data parsing, because we have to loop + * through the property list anyway - for storing locally. */ + STOPIF( rev__get_text_to_stream( url, rev_to_take, decoder, + stream, sts, NULL, &props, pool), NULL); + - if (sts->entry_type != FT_FILE) + if (apr_hash_get(props, propname_special, APR_HASH_KEY_STRING)) { - target_stringbuf->data[ target_stringbuf->len ]=0; - DEBUGP("got special value %s", target_stringbuf->data); + STOPIF( ops__read_special_entry( a_stream, &special_data, + 0, NULL, filename_tmp, subpool), NULL); + + /* The correct type gets set on parsing. */ STOPIF( up__handle_special(sts, filename_tmp, - target_stringbuf->data, subpool), NULL); + special_data, subpool), NULL); } - STOPIF( prp__set_from_aprhash(sts, props, subpool), NULL); + STOPIF( prp__set_from_aprhash(sts, props, STORE_IN_FS, subpool), NULL); /* We write all meta-data. If we got no values from the repository, we just * write what we have in the local filesystem back - the temporary file has * just some default values, after all. */ - sts->entry_status |= FS_META_CHANGED; + sts->remote_status |= FS_META_CHANGED; DEBUGP("setting meta-data"); STOPIF( up__set_meta_data(sts, filename_tmp), NULL); - if (only_tmp) - { - DEBUGP("returning temporary file %s", filename_tmp); - *only_tmp=filename_tmp; - } - else - { - DEBUGP("rename to %s", filename); - /* rename to correct filename */ - STOPIF_CODE_ERR( rename(filename_tmp, filename)==-1, errno, - "Cannot rename '%s' to '%s'", filename_tmp, filename); - - /* The rename changes the ctime. */ - STOPIF( hlp__lstat( filename, &(sts->st)), - "Cannot lstat('%s')", filename); - } + STOPIF( apr_file_close(a_stream), NULL); + + + DEBUGP("rename to %s", filename); + /* rename to correct filename */ + STOPIF_CODE_ERR( rename(filename_tmp, filename)==-1, errno, + "Cannot rename '%s' to '%s'", filename_tmp, filename); + + /* The rename changes the ctime. */ + STOPIF( hlp__lstat( filename, &(sts->st)), + "Cannot lstat('%s')", filename); + sts->url=current_url; /* We have to re-sort the parent directory, as the inode has changed @@ -395,6 +536,7 @@ apr_pool_destroy(subpool); subpool=NULL; + ex: /* On error remove the temporary file. */ /* Return the original error. */ @@ -449,10 +591,12 @@ /* No need to close hdl -- it's opened only for that process, and will * be closed when it exec()s. */ + /* Remove the ./ at the front */ + setenv(FSVS_EXP_CURR_ENTRY, output+2, 1); + STOPIF_CODE_ERR( execlp( opt__get_string(OPT__MERGE_PRG), opt__get_string(OPT__MERGE_PRG), opt__get_string(OPT__MERGE_OPT), - "-p", file1, common, file2, NULL) == -1, errno, "Starting the merge program \"%s\" failed", @@ -537,7 +681,65 @@ NULL, &props, pool) ); - STOPIF( prp__set_from_aprhash(sts, props, pool), NULL); + STOPIF( prp__set_from_aprhash(sts, props, STORE_IN_FS, pool), NULL); + +ex: + return status; +} + + +/** Set, reset or fetch the mtime of a directory. + * */ +int rev___handle_dir_mtime(struct estat *dir, + enum rev___dir_change_flag_e dir_flag) +{ + int status; + char *path; + + + /* Now, after all has been said and done for the children, set and re-get + * the actual meta-data - the mtime has been changed in the meantime + * (because of child node creation), and maybe this filesystem's + * granularity is worse than on commit; then the timestamps would be + * wrong. */ + status=0; + DEBUGP("dir_flag says %X", dir_flag); + + + if (dir_flag & SET_CURRENT) goto a; + if (dir_flag & REVERT_MTIME) goto b; + if (dir->remote_status & FS_META_CHANGED) goto c; + if (dir_flag & GET_TSTAMP) goto d; + /* Is there some better syntax? Some kind of switch with + * case-expressions? + * + * I had a lot of if () { } with partially-overlapping conditions: + * if (dir->flag & x) A(); + * if (dir->flag & (x|y)) B(); + * but gcc wouldn't simply emit a "jmp" to B() after the A() - and I + * couldn't easily see that the statements were accumulative. + * */ + goto x; + +a: + /* If there's an intentional change (like merging), the current time is + * taken. */ + time( & dir->st.mtim.tv_sec ); +b: + /* Make sure that the value is written back to the filesystem.*/ + dir->remote_status |= FS_META_MTIME; +c: + STOPIF( up__set_meta_data(dir, NULL), NULL); +d: + /* ops__update_single_entry() would trash the entry_status field! */ + STOPIF( ops__build_path(&path, dir), NULL); + STOPIF( hlp__lstat(path, &dir->st), NULL); + + /* If it had changes, we'll have to check next time. */ + if (dir->entry_status & FS_CHANGED) + dir->flags |= RF_CHECK; + +x: ex: return status; @@ -556,13 +758,18 @@ * finishing the report, and may not perform any RA operations using * @a session from within the editing operations of @a update_editor. * \endcode + * + * We may not change \c sts->entry_status - the caller still needs it; and + * as this is a revert to \c BASE, we must not modify the entry list + * either. * */ -int rev__action(struct estat *sts) +int rev___revert_to_base(struct estat *sts, + enum rev___dir_change_flag_e *dir_change_flag, + apr_pool_t *pool) { int status; - svn_revnum_t fetched, wanted; + svn_revnum_t wanted; struct estat copy; - char *url_to_fetch; char *path; @@ -579,21 +786,10 @@ sts->flags &= ~RF_UNVERSION; DEBUGP("removing unversion on %s", path); } - else if ( (sts->flags & RF_COPY_BASE) && - !(sts->entry_status & (FS_META_CHANGED | FS_CHANGED) ) ) - { - /* Directly copied, unchanged entry. - * Make it unknown - remove copy relation (ie. mark hash value for - * deletion), and remove entry from local list. */ - STOPIF( cm__get_source(sts, path, NULL, NULL, 1), NULL); - sts->flags &= ~RF_COPY_BASE; - sts->entry_type = FT_IGNORE; - DEBUGP("unchanged copy, reverting %s", path); - } else if ( sts->flags & RF_ADD ) { /* Added entry just gets un-added ... ie. unknown. */ - sts->entry_type = FT_IGNORE; + sts->to_be_ignored=1; DEBUGP("removing add-flag on %s", path); } else if (!( (sts->flags & (RF_COPY_BASE | RF_COPY_SUB)) || sts->url ) ) @@ -612,7 +808,10 @@ DEBUGP("have an URL for %s", path); if ( sts->flags & RF_CONFLICT ) + { + *dir_change_flag |= REVERT_MTIME; STOPIF( res__remove_aux_files(sts), NULL); + } /* If not seen as changed, but target is BASE, we don't need to do * anything. */ @@ -629,24 +828,6 @@ * Print the current revision. */ - url_to_fetch=NULL; - if (sts->flags & RF___IS_COPY) - { - /* No URL yet, but we can reconstruct it. */ - /* We have to use the copyfrom */ - STOPIF( cm__get_source(sts, NULL, &url_to_fetch, &wanted, 0), NULL); - - /* \TODO: That doesn't work for unknown URLs. */ - STOPIF( url__find(url_to_fetch, ¤t_url), NULL); - } - else - { - STOPIF( !sts->url, - "The entry '%s' has no URL associated.", path); - current_url = sts->url; - } - - if (sts->parent && (!number_reverted || last_rev != wanted)) { printf("Reverting to revision %s:\n", hlp__rev_to_string(wanted)); @@ -659,12 +840,8 @@ copy=*sts; /* Parent directories might just have been created. */ - if (sts->entry_type & FT_NONDIR) + if (!S_ISDIR(sts->st.mode)) { - /* We cannot give connection errors before stat()ing many thousand - * files, because we do not know which URL to open -- until here. */ - STOPIF( url__open_session(NULL), NULL); - DEBUGP("file was changed, reverting"); /* \todo It would be nice if we could solve meta-data *for the current @@ -674,9 +851,9 @@ * data, reset rights. * */ /* TODO - opt_target_revision ? */ - STOPIF( rev__get_file(sts, wanted, url_to_fetch, - &fetched, NULL, current_url->pool), + STOPIF( rev__install_file(sts, wanted, sts->decoder, pool), "Unable to revert entry '%s'", path); + *dir_change_flag |= REVERT_MTIME; } else { @@ -685,77 +862,138 @@ status = (mkdir(path, sts->st.mode & 07777) == -1) ? errno : 0; DEBUGP("mkdir(%s) says %d", path, status); STOPIF(status, "Cannot create directory '%s'", path); + *dir_change_flag |= REVERT_MTIME; + + /* As we just created the directory, we need *all* meta-data reset. + * */ + sts->remote_status |= FS_META_CHANGED; + } + else + { + /* Code for directories. + * As the children are handled by the recursive options and by \a + * ops__set_to_handle_bits(), we only have to restore the directories' + * meta-data here. */ + /* up__set_meta_data() checks remote_status, while we here have + * entry_status set. */ + sts->remote_status=sts->entry_status; } - /* Code for directories. - * As the children are handled by the recursive options and by \a - * ops__set_to_handle_bits(), we only have to restore the directories' - * meta-data here. */ - /* up__set_meta_data() checks remote_status, while we here have - * entry_status set. */ - sts->remote_status=sts->entry_status; STOPIF( up__set_meta_data(sts, NULL), NULL); if (sts->entry_status) sts->flags |= RF_CHECK; } - - - /* We do not allow mixed revision working copies - we'd have to store an - * arbitrary number of revision/URL pairs for directories. - * See cb__record_changes(). - * - * What we *do* allow is to switch entries' *data* to other revisions; but - * for that we store the old values of BASE, so that the file is displayed - * as modified. - * An revert without "-r" will restore BASE, and a commit will send the - * current (old :-) data. - * - * But we show that it's modified. */ - if (opt_target_revisions_given) - { - copy.entry_status = ops__stat_to_action(©, & sts->st); - if (sts->entry_type != FT_DIR && copy.entry_type != FT_DIR) - /* Same MD5? => Same file. Not fully true, though. */ - if (memcmp(sts->md5, copy.md5, sizeof(copy.md5)) == 0) - copy.entry_status &= ~FS_LIKELY; - *sts=copy; - } - else - { - /* There's no change anymore, we're at BASE. - * But just printing "...." makes no sense ... show the old status. */ - } } - /* And if it was chosen directly, it should be printed, even if we have - * the "old" revision. */ + /* There's no change anymore, we're at BASE. + * But just printing "...." makes no sense ... show the old status. */ sts->flags |= RF_PRINT; - STOPIF( st__status(sts), NULL); +ex: + return status; +} + + +/** Reset local changes. */ +int rev___no_local_change(struct estat *sts) +{ + sts->entry_status=0; + return st__progress(sts); +} + +/** -. + * Recurses for rev___revert_to_base. + * + * There's a bit of uglyness here, regarding deleted directories ... + * + * 1) If we do the tree depth-first, we have to build multiple levels of + * directories at once - and store which have to have their meta-data + * reset. + * + * 2) If we do level after level, we might end up with either + * a) re-creating a directory, doing its children, then have to re-set the + * meta-data of this directory, or + * b) just store that the meta-data has to be done for later. + * + * Currently we do 2a - that seems the simplest, and has no big performance + * penalty. */ +int rev___local_revert(struct estat *dir, + apr_pool_t *pool) +{ + int status; + int i, do_undo; + struct estat *sts; + apr_pool_t *subpool; + enum rev___dir_change_flag_e dir_flag; + + + status=0; + subpool=NULL; + dir_flag= NOT_CHANGED; - /* Entries that are now ignored don't matter for the parent directory; - * but if we really changed something in the filesystem, we have to - * update the parent status. */ - if (sts->entry_type != FT_IGNORE) - { - /* For files we have no changes anymore. - * Removing the FS_REMOVED flag for directories means that the children - * will be loaded, and we get called again after they're done :-) - * See also actionlist_t::local_callback. */ - sts->entry_status=0; - - /* For directories we set that we still have meta-data to do - children - * might change our mtime. */ - if (S_ISDIR(sts->st.mode)) - sts->entry_status=FS_META_MTIME; - - /* Furthermore the parent should be re-stat()ed after the children have - * finished. */ - if (sts->parent) - sts->parent->entry_status |= FS_META_MTIME; + for(i=0; ientry_count; i++) + { + sts=dir->by_inode[i]; + STOPIF( apr_pool_create(&subpool, pool), "Cannot get a subpool"); + + do_undo = sts->do_this_entry && + (sts->entry_status & FS__CHANGE_MASK) && + ops__allowed_by_filter(sts); + + DEBUGP("on %s: do_undo=%d, st=%s", sts->name, do_undo, + st__status_string_fromint(sts->entry_status)); + + if (do_undo) + STOPIF( rev___revert_to_base(sts, &dir_flag, subpool), NULL); + + if (S_ISDIR(sts->st.mode) && + (sts->entry_status & FS_CHILD_CHANGED)) + STOPIF( rev___local_revert(sts, subpool), NULL); + + if (do_undo) + STOPIF( st__status(sts), NULL); + + + apr_pool_destroy(subpool); + subpool=NULL; } + /* We cannot free the memory earlier - the data is needed for the status + * output and recursion. */ + STOPIF( ops__free_marked(dir, 0), NULL); + + /* The root entry would not be printed; do that here. */ + if (!dir->parent) + STOPIF( st__status(dir), NULL); + + + STOPIF( rev___handle_dir_mtime(dir, dir_flag), NULL); + +ex: + return status; +} + + +/** Copy local changeflags estat::entry_status to estat::remote_status. + * + * This makes rev__do_changed() undo the local changes. + * + * Directories above need the FS_CHILD_CHANGED flag; if we'd change the + * filter to run through all entries we'd do too many (ignoring the list + * given on the command line). + * At least we'd be in the right order - because every sub-entry gets done + * before the parents. */ +int rev___copy_changeflags(struct estat *sts) +{ + int status; + + sts->remote_status=sts->entry_status; + sts->entry_status=0; + ops__mark_parent_cc(sts, remote_status); + + STOPIF( st__progress(sts), NULL); + ex: return status; } @@ -769,6 +1007,7 @@ int status; char **normalized; time_t delay_start; + svn_revnum_t rev; status=0; @@ -779,16 +1018,24 @@ STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); - STOPIF( url__load_nonempty_list(NULL, 0), NULL); - if (opt_target_revisions_given) + { STOPIF( wa__warn( WRN__MIXED_REV_WC, EINVAL, "Sorry, fsvs currently doesn't allow mixed revision working copies.\n" "Entries will still be compared against the BASE revision.\n"), NULL); + // TODO: necessary? + action->local_callback=rev___no_local_change; + } + else + { + /* No revision given - just go back to BASE. */ + action->local_callback=st__progress; + } + /* This message can be seen because waa__find_common_base() looks for * an "url" file and not for a "dir" -- which means that this tree @@ -807,276 +1054,316 @@ else STOPIF(status, NULL); + STOPIF( st__progress_uninit(), NULL); + + if (opt_target_revisions_given) + { + while ( ! ( status=url__iterator(&rev) ) ) + { + STOPIF( cb__record_changes(root, rev, current_url->pool), NULL); + } + STOPIF_CODE_ERR( status != EOF, status, NULL); + + STOPIF( rev__do_changed(root, global_pool), NULL); + } + else + { + /* The local changes are taken as to be undone. + * + * We cannot go by estat::entry_status - things like RF_ADD have to be + * undone, too. + * + * waa__do_sorted_tree() can't be used, either, because it does the + * directory *before* the children - which makes the directories' mtime + * wrong if children get created or deleted. */ + STOPIF( rev___local_revert(root, global_pool), NULL); + } + only_check_status=0; - delay_start=time(NULL); - STOPIF( waa__output_tree(root), NULL); - STOPIF( hlp__delay(delay_start, DELAY_REVERT), NULL); + + /* If this was a revert with destination revision, we might have changed + * the entire hierarchy - replaced directories with files, etc. + * This changed tree must not be written, because it's not the state of + * BASE. + * [ And if we had to write the original (BASE) list for some cause, + * we'd have to read the list afresh, and change what we have to. + * Or, the other way: when getting the changes for the given revision + * from the repository we'd have to put them in the estat::old shadow + * tree, to keep the entry list correct. ] + * + * If this was a revert to BASE, we have to write the list, because the + * ctime of the inodes will be changed - and would mark the entries as + * "maybe changed". */ + if (!opt_target_revisions_given) + { + delay_start=time(NULL); + STOPIF( waa__output_tree(root), NULL); + STOPIF( hlp__delay(delay_start, DELAY_REVERT), NULL); + } ex: return status; } -/** -. - * Used on update. */ -int rev__do_changed(svn_ra_session_t *session, - struct estat *dir, +/** Convenience function to reduce indenting. */ +int rev___undo_change(struct estat *sts, + enum rev___dir_change_flag_e *dir_change_flag, apr_pool_t *pool) { int status; - int i, j; - struct estat *sts; char *fn; - struct sstat_t st; - apr_pool_t *subpool; const char *unique_name_mine, *unique_name_remote, *unique_name_common; char revnum[12]; + int j; + struct estat *removed; + struct sstat_t st; - status=0; - subpool=NULL; - for(i=0; ientry_count; i++) - { - sts=dir->by_inode[i]; + STOPIF( ops__build_path( &fn, sts), NULL); + DEBUGP("%s has changed: mode=0%o, r=%X(%s), l=%X(%s)", + fn, sts->st.mode, + sts->remote_status, st__status_string_fromint(sts->remote_status), + sts->entry_status, st__status_string(sts)); - STOPIF( apr_pool_create(&subpool, pool), "Cannot get a subpool"); + /* If we remove an entry, the entry_count gets decremented; + * we have to repeat the loop *for the same index*. */ - if (sts->remote_status & FS__CHANGE_MASK) + + unique_name_mine=NULL; + + /* Conflict handling; depends whether it has changed locally. */ + if (sts->entry_status & FS_CHANGED) + switch (opt__get_int(OPT__CONFLICT)) { - STOPIF( ops__build_path( &fn, sts), NULL); - DEBUGP("%s has changed: %X, mode=0%o (local %X - %s)", - fn, sts->remote_status, sts->st.mode, sts->entry_status, - st__status_string(sts)); - - /* If we remove an entry, the entry_count gets decremented; - * we have to repeat the loop *for the same index*. */ - - - unique_name_mine=NULL; - - /* Conflict handling; depends whether it has changed locally. */ - if (sts->entry_status & (FS_CHANGED | FS_CHILD_CHANGED)) - switch (opt__get_int(OPT__CONFLICT)) - { - case CONFLICT_STOP: - STOPIF( EBUSY, "!The entry %s has changed locally", fn); - break; - - case CONFLICT_LOCAL: - /* Next one, please. */ - printf("Conflict for %s skipped.\n", fn); - continue; - - case CONFLICT_REMOTE: - /* Just ignore local changes. */ - break; - - case CONFLICT_MERGE: - case CONFLICT_BOTH: - /* Rename local file to something like .mine. */ - STOPIF( hlp__rename_to_unique(fn, ".mine", - &unique_name_mine, pool), NULL); - /* Now the local name is not used ... so get the file. */ - break; - - default: - BUG("unknown conflict resolution"); - } - - - /* If the entry has been removed in the repository, we remove it - * locally, too (if it wasn't changed). - * But the type in the repository may be another than the local one - - * so we have to check what we currently have. */ - /* An entry can be given as removed, and in the same step be created - * again - possibly as another type. */ - if (sts->remote_status & FS_REMOVED) + case CONFLICT_STOP: + STOPIF( EBUSY, "!The entry %s has changed locally", fn); + break; + + case CONFLICT_LOCAL: + /* Next one, please. */ + STOPIF_CODE_EPIPE( printf("Conflict for %s skipped.\n", fn), NULL); + goto ex; + + case CONFLICT_REMOTE: + /* Just ignore local changes. */ + break; + + case CONFLICT_MERGE: + case CONFLICT_BOTH: + /* Rename local file to something like .mine. */ + STOPIF( hlp__rename_to_unique(fn, ".mine", + &unique_name_mine, pool), NULL); + /* Now the local name is not used ... so get the file. */ + break; + + default: + BUG("unknown conflict resolution"); + } + + + /* If the entry has been removed in the repository, we remove it + * locally, too (if it wasn't changed). + * But the type in the repository may be another than the local one - + * so we have to check what we currently have. */ + /* An entry can be given as removed, and in the same step be created + * again - possibly as another type. */ + + /* If the entry wasn't replaced, but only removed, there's no + * sts->old. */ + removed=sts->old ? sts->old : sts; + if (removed->remote_status & FS_REMOVED) + { + DEBUGP("old entry removed"); + + /* Is the entry already removed? */ + /* If there's a typechange involved, the old entry has been + * renamed, and so doesn't exist in the filesystem anymore. */ + if ((sts->entry_status & FS_REPLACED) != FS_REMOVED && + !unique_name_mine) + { + /* Find type. Small race condition - it might be removed now. */ + if (S_ISDIR(removed->st.mode)) { - /* Is the entry already removed? */ - /* If there's a typechange involved, the old entry has been - * renamed, and so doesn't exist in the filesystem anymore. */ - if ((sts->entry_status & FS_REPLACED) != FS_REMOVED && - !unique_name_mine) - { - /* Find type. Small race condition - it might be removed now. */ - /** \todo - remember what kind it was *before updating*, and - * only remove if still of same kind. - * make switch depend on old type? */ - STOPIF( hlp__lstat(fn, &st), NULL); - if (S_ISDIR(st.mode)) - { - STOPIF( up__rmdir(sts), NULL); - /* Free space used by children */ - for(j=0; jentry_count; j++) - STOPIF( ops__free_entry(sts->by_inode+j), NULL); - /* Now there're none */ - sts->entry_count=0; - sts->by_inode=sts->by_name=NULL; - } - else - STOPIF( up__unlink(sts, fn), NULL); - } - - /* Only removed, or added again? */ - if (sts->remote_status & FS_NEW) - DEBUGP("entry %s added again", fn); - else - { - /* We must do that here ... later the entry is no longer valid. */ - STOPIF( st__rm_status(sts), NULL); - STOPIF( ops__delete_entry(dir, NULL, i, UNKNOWN_INDEX), NULL); - i--; - goto loop_end; - } + STOPIF( up__rmdir(removed, sts->url), NULL); } + else + STOPIF( up__unlink(removed, fn), NULL); + } + *dir_change_flag|=REVERT_MTIME; + } - /* If we change something in this directory, we have to re-sort the - * entries by inode again. */ - dir->to_be_sorted=1; - current_url=sts->url; + /* If we change something in this directory, we have to re-sort the + * entries by inode again. */ + sts->parent->to_be_sorted=1; - if (S_ISDIR(sts->st.mode)) - { - /* Just try to create the directory, and ignore EEXIST. */ - status = (mkdir(fn, sts->st.mode & 07777) == -1) ? errno : 0; - DEBUGP("mkdir(%s) says %d", fn, status); - if (status == EEXIST) - { - /* Make sure it's a directory */ - STOPIF( hlp__lstat(fn, &st), NULL); - STOPIF_CODE_ERR( !S_ISDIR(st.mode), EEXIST, - "%s does exist as a non-directory entry!", fn); - /* \todo conflict? */ - } - else - STOPIF( status, "Cannot create directory %s", fn); + if ((sts->remote_status & FS_REPLACED) == FS_REMOVED) + { + sts->to_be_ignored=1; + goto ex; + } + current_url=sts->url; - /* Meta-data is done later. */ + if (S_ISDIR(sts->st.mode)) + { + *dir_change_flag|=REVERT_MTIME; + STOPIF( waa__mkdir_mask(fn, 1, sts->st.mode), NULL); + + /* Meta-data is done later. */ + + /* An empty directory need not be sorted; if we get entries, + * we'll mark it with \c to_be_sorted .*/ + } + else if (sts->remote_status & (FS_CHANGED | FS_REPLACED)) + /* Not a directory */ + { + STOPIF( rev__install_file(sts, 0, sts->decoder, pool), NULL); + *dir_change_flag|=REVERT_MTIME; - /* An empty directory need not be sorted; if we get entries, - * we'll mark it with \c to_be_sorted .*/ + /* We had a conflict; rename the file fetched from the + * repository to a unique name. */ + if (unique_name_mine) + { + *dir_change_flag|=SET_CURRENT; + + /* If that revision number overflows, we've got bigger problems. + * */ + snprintf(revnum, sizeof(revnum)-1, + ".r%llu", (t_ull)sts->repos_rev); + revnum[sizeof(revnum)-1]=0; + + STOPIF( hlp__rename_to_unique(fn, revnum, + &unique_name_remote, pool), NULL); + + /* If we're updating and already have a conflict, we don't + * merge again. */ + if (sts->flags & RF_CONFLICT) + { + STOPIF_CODE_EPIPE( + printf("\"%s\" already marked as conflict.\n", fn), + NULL); + STOPIF( res__mark_conflict(sts, + unique_name_mine, unique_name_remote, NULL), NULL); + } + else if (opt__get_int(OPT__CONFLICT) == CONFLICT_BOTH) + { + STOPIF( res__mark_conflict(sts, + unique_name_mine, unique_name_remote, NULL), NULL); + + /* Create an empty file, + * a) to remind the user, and + * b) to avoid a "Deleted" status. */ + j=creat(fn, 0777); + if (j != -1) j=close(j); + + STOPIF_CODE_ERR(j == -1, errno, + "Error creating \"%s\"", fn); + + /* up__set_meta_data() does an lstat(), but we want the + * original values. */ + st=sts->st; + STOPIF( up__set_meta_data(sts, fn), NULL); + sts->st=st; } - else /* Not a directory */ + else if (opt__get_int(OPT__CONFLICT) == CONFLICT_MERGE) { - if (sts->remote_status & (FS_CHANGED | FS_REPLACED)) - { - STOPIF( rev__get_file(sts, sts->repos_rev, NULL, - NULL, NULL, subpool), - NULL); - - /* We had a conflict; rename the file fetched from the - * repository to a unique name. */ - if (unique_name_mine) - { - /* If that revision number overflows, we've got bigger problems. - * */ - snprintf(revnum, sizeof(revnum)-1, - ".r%llu", (t_ull)sts->repos_rev); - revnum[sizeof(revnum)-1]=0; - - STOPIF( hlp__rename_to_unique(fn, revnum, - &unique_name_remote, pool), NULL); - - /* If we're updating and already have a conflict, we don't - * merge again. */ - if (sts->flags & RF_CONFLICT) - { - printf("\"%s\" already marked as conflict.\n", fn); - STOPIF( res__mark_conflict(sts, - unique_name_mine, unique_name_remote, NULL), NULL); - } - else if (opt__get_int(OPT__CONFLICT) == CONFLICT_BOTH) - { - STOPIF( res__mark_conflict(sts, - unique_name_mine, unique_name_remote, NULL), NULL); - - /* Create an empty file, - * a) to remind the user, and - * b) to avoid a "Deleted" status. */ - j=creat(fn, 0777); - if (j != -1) j=close(j); - - STOPIF_CODE_ERR(j == -1, errno, - "Error creating \"%s\"", fn); - - /* up__set_meta_data() does an lstat(), but we want the - * original values. */ - st=sts->st; - STOPIF( up__set_meta_data(sts, fn), NULL); - sts->st=st; - } - else if (opt__get_int(OPT__CONFLICT) == CONFLICT_MERGE) - { - { - STOPIF( rev__get_file(sts, sts->old_rev, NULL, - NULL, NULL, subpool), - NULL); - - snprintf(revnum, sizeof(revnum)-1, - ".r%llu", (t_ull)sts->old_rev); - revnum[sizeof(revnum)-1]=0; - - STOPIF( hlp__rename_to_unique(fn, revnum, - &unique_name_common, pool), NULL); - - STOPIF( rev__merge(sts, - unique_name_mine, - unique_name_common, - unique_name_remote), NULL); - } - } - else - BUG("why a conflict?"); - } - } - else - { - /* If user-defined properties have changed, we have to fetch them - * from the repository, as we don't store them in RAM (due to the - * amount of memory possibly needed). */ - if (sts->remote_status & FS_PROPERTIES) - STOPIF( rev__get_props(sts, NULL, sts->repos_rev, subpool), NULL); - - if (sts->remote_status & FS_META_CHANGED) - { - /* If we removed the file, it has no meta-data any more; - * if we fetched it via rev__get_file(), it has it set already. - * Only the case of *only* meta-data-change is to be done. */ - STOPIF( up__set_meta_data(sts, fn), NULL); - } - } + STOPIF( rev__install_file(sts, sts->old_rev, + NULL, pool), NULL); + + snprintf(revnum, sizeof(revnum)-1, + ".r%llu", (t_ull)sts->old_rev); + revnum[sizeof(revnum)-1]=0; + + STOPIF( hlp__rename_to_unique(fn, revnum, + &unique_name_common, pool), NULL); + + STOPIF( rev__merge(sts, + unique_name_mine, + unique_name_common, + unique_name_remote), NULL); } + else + BUG("why a conflict?"); + } + } + else + { + /* If user-defined properties have changed, we have to fetch them + * from the repository, as we don't store them in RAM (due to the + * amount of memory possibly needed). */ + if (sts->remote_status & FS_PROPERTIES) + STOPIF( rev__get_props(sts, NULL, sts->repos_rev, pool), NULL); + + if (sts->remote_status & FS_META_CHANGED) + { + /* If we removed the file, it has no meta-data any more; + * if we fetched it via rev__get_file(), it has it set already. + * Only the case of *only* meta-data-change is to be done. */ + STOPIF( up__set_meta_data(sts, fn), NULL); } + } + + +ex: + return status; +} +/** -. + * Used on update. */ +int rev__do_changed(struct estat *dir, + apr_pool_t *pool) +{ + int status; + int i; + struct estat *sts; + apr_pool_t *subpool; + enum rev___dir_change_flag_e dir_flag; + + + status=0; + subpool=NULL; + dir_flag= (dir->entry_status & FS_NEW) || + (dir->remote_status & FS_NEW) ? REVERT_MTIME : NOT_CHANGED; + + /* If some children have changed, do a full run. + * Else just repair meta-data. */ + if (!(dir->remote_status & FS_CHILD_CHANGED)) + DEBUGP("%s: no children changed", dir->name); + else for(i=0; ientry_count; i++) + { + sts=dir->by_inode[i]; + + STOPIF( apr_pool_create(&subpool, pool), "Cannot get a subpool"); + + if (sts->remote_status & FS__CHANGE_MASK) + STOPIF( rev___undo_change(sts, &dir_flag, subpool), NULL); + /* We always recurse now, even if the directory has no children. * Else we'd have to check for children in a few places above, which would * make the code unreadable. */ - if (S_ISDIR(sts->st.mode) - /* && - (sts->remote_status & FS_CHILD_CHANGED) */ - ) + if (S_ISDIR(sts->st.mode) && + (sts->remote_status & FS_REPLACED) != FS_REMOVED) { apr_pool_destroy(subpool); subpool=NULL; STOPIF( apr_pool_create(&subpool, pool), "subpool creation"); - STOPIF( rev__do_changed(session, sts, subpool), NULL); + STOPIF( rev__do_changed(sts, subpool), NULL); } - /* After the recursive call the path string may not be valid anymore. - * */ STOPIF( st__rm_status(sts), NULL); -loop_end: apr_pool_destroy(subpool); subpool=NULL; } + /* We cannot free the memory earlier - the data is needed for the status + * output and recursion. */ STOPIF( ops__free_marked(dir, 0), NULL); /* The root entry would not be printed; do that here. */ @@ -1091,17 +1378,12 @@ if (dir->entry_status & FS__CHANGE_MASK) dir->flags |= RF_CHECK; - /* Now, after all has been said and done for the children, set and re-get - * the actual meta-data - the mtime has been changed in the meantime - * (because of child node creation), and maybe this filesystem's - * granularity is worse than on commit; then the timestamps would be - * wrong. */ - /* TODO: should the newer of both timestamps be taken (or current time), - * if the directory has changed against the directory version? */ - STOPIF( up__set_meta_data(dir, NULL), NULL); - STOPIF( ops__update_single_entry(dir, NULL), NULL); + STOPIF( rev___handle_dir_mtime(dir, dir_flag), NULL); + ex: return status; } + + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/revert.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/revert.h --- fsvs-1.1.14/src/revert.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/revert.h 2008-10-10 15:55:55.000000000 +0100 @@ -17,21 +17,17 @@ /** \ref revert main action function. */ work_t rev__work; -/** \a Revert callback function. */ -action_t rev__action; +/** Has to fetch the decoder from the repository. */ +#define DECODER_UNKNOWN ((char*)-1) -/** Fetches a given entry from the repository. */ -int rev__get_file(struct estat *sts, - svn_revnum_t revision, - char *url_to_use, - svn_revnum_t *fetched, - char **only_tmp, +/** Gets a clean copy from the repository. */ +int rev__install_file(struct estat *sts, svn_revnum_t revision, + char *decoder, apr_pool_t *pool); /** Go through the tree, and fetch all changed entries (estimated * per \c remote_status). */ -int rev__do_changed(svn_ra_session_t *session, - struct estat *dir, +int rev__do_changed(struct estat *dir, apr_pool_t *pool); /** Gets and writes the properties of the given \a sts into its \ref prop @@ -41,6 +37,31 @@ svn_revnum_t revision, apr_pool_t *pool); +/** Gets the entry into a temporary file. */ +int rev__get_text_to_tmpfile(char *loc_url, svn_revnum_t revision, + char *encoder, + char *filename_base, char **filename, + struct estat *sts_for_manber, + struct estat *output_sts, apr_hash_t **props, + apr_pool_t *pool); + +/** Just a wrapper for rev__get_text_to_stream(). */ +int rev__get_text_into_buffer(char *loc_url, svn_revnum_t revision, + const char *decoder, + svn_stringbuf_t **output, + struct estat *sts_for_manber, + struct estat *output_sts, + apr_hash_t **props, + apr_pool_t *pool); + +/** General function to get a file into a stream. */ +int rev__get_text_to_stream( char *loc_url, svn_revnum_t revision, + const char *decoder, + svn_stream_t *output, + struct estat *sts_for_manber, + struct estat *output_sts, + apr_hash_t **props, + apr_pool_t *pool); #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/status.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/status.c --- fsvs-1.1.14/src/status.c 2008-04-02 06:25:13.000000000 +0100 +++ fsvs-1.1.17/src/status.c 2008-10-10 15:57:39.000000000 +0100 @@ -65,12 +65,14 @@ * entry makes no sense. * - A changed type (character device to symlink, file to directory etc.) * is given as \c 'R' (replaced), ie. as removed and newly added. - * - If the entry has been modified, the change is shown as \c 'C'. \n + * - \anchor status_possibly + * If the entry has been modified, the change is shown as \c 'C'. \n * If the modification or status change timestamps (mtime, ctime) are * changed, but the size is still the same, the entry is marked as - * possibly changed (a question mark \c '?' is printed). See \ref - * opt_checksum. - * - The meta-data flag \c 'm' shows meta-data changes like properties, + * possibly changed (a question mark \c '?' is printed) - but see \ref + * o_chcheck "change detection" for details. + * - \anchor status_meta_changed + * The meta-data flag \c 'm' shows meta-data changes like properties, * modification timestamp and/or the rights (owner, group, mode); * depending on the \ref glob_opt_verb "-v/-q" command line parameters, * it may be splitted into \c 'P' (properties), \c 't' (time) and \c 'p' @@ -108,18 +110,18 @@ { static char buffer[20]; - switch (sts->entry_type) + switch ( (sts->updated_mode ? sts->updated_mode : sts->st.mode) & S_IFMT) { - case FT_CDEV: - case FT_BDEV: + case S_IFBLK: + case S_IFCHR: return "dev"; - case FT_DIR: + case S_IFDIR: return "dir"; default: /* When in doubt, believe it's a normal file. * We have that case for sync-repos - could be fixed some time. */ - case FT_FILE: - case FT_SYMLINK: + case S_IFREG: + case S_IFLNK: sprintf(buffer, "%llu", (t_ull) sts->st.size); break; } @@ -181,9 +183,10 @@ if (opt_verbose <0) goto ex; - /* If the entry is new, got added or will be unversioned, we know that - * all meta-data has changed; we show only the essential information. */ - if ((status_bits & FS_NEW) || + /* If the entry is new or deleted, got added or will be unversioned, we + * know that all meta-data has changed; we show only the essential + * information. */ + if ((status_bits & (FS_NEW | FS_REMOVED)) || (flags & (RF_ADD | RF_UNVERSION))) status_bits &= ~(FS_META_CHANGED | FS_LIKELY | FS_CHANGED); @@ -216,17 +219,17 @@ STOPIF( hlp__format_path(sts, path, &path), NULL); - status= 0> - printf("%s%c%s%c%c %8s %s%s%s%s%s\n", - opt__get_int(OPT__STATUS_COLOR) ? st___color(status_bits) : "", - - flags & RF_ADD ? 'n' : - flags & RF_UNVERSION ? 'd' : - (status_bits & FS_REPLACED) == FS_REPLACED ? 'R' : - status_bits & FS_NEW ? 'N' : - status_bits & FS_REMOVED ? 'D' : '.', + STOPIF_CODE_ERR( + printf("%s%c%s%c%c %8s %s%s%s%s%s\n", + opt__get_int(OPT__STATUS_COLOR) ? st___color(status_bits) : "", + + flags & RF_ADD ? 'n' : + flags & RF_UNVERSION ? 'd' : + (status_bits & FS_REPLACED) == FS_REPLACED ? 'R' : + status_bits & FS_NEW ? 'N' : + status_bits & FS_REMOVED ? 'D' : '.', - st___meta_string(status_bits, flags), + st___meta_string(status_bits, flags), flags & RF_CONFLICT ? 'x' : status_bits & FS_CHANGED ? 'C' : '.', @@ -238,17 +241,16 @@ ( ( status_bits & FS_REMOVED ) && ( flags & (RF_UNVERSION | RF_ADD) ) ) ? '!' : '.', - size, path, - opt__get_int(OPT__STATUS_COLOR) ? ANSI__NORMAL : "", - + size, path, + opt__get_int(OPT__STATUS_COLOR) ? ANSI__NORMAL : "", + /* Here the comparison of opt_verbose is already included in the * check on copyfrom above. */ copyfrom ? " (copied from " : "", copyfrom ? copyfrom : copy_inherited ? " (inherited)" : "", - copyfrom ? ")" : ""); - /* possibly a EPIPE */ - STOPIF_CODE_ERR(status, errno, "Broken Pipe"); + copyfrom ? ")" : "") == -1, + errno, "Error printing output"); } @@ -261,21 +263,17 @@ * */ int st__status(struct estat *sts) { - int status; + int status; int e_stat, flags; char *path; - status=0; - STOPIF( ops__build_path(&path, sts), NULL); + status=0; + STOPIF( ops__build_path(&path, sts), NULL); - /* Is this entry already done? */ - if (sts->was_output) - { - DEBUGP("%s was already output ...", path); - goto ex; - } - sts->was_output=1; + /* Is this entry already done? */ + BUG_ON(sts->was_output, "%s was already output ...", path); + sts->was_output=1; e_stat=sts->entry_status; @@ -302,6 +300,24 @@ /** -. * */ +int st__action(struct estat *sts) +{ + int status; + + if (opt__get_int(OPT__STOP_ON_CHANGE) && + sts->entry_status) + /* Status is a read-only operation, so that works. */ + exit(1); + + STOPIF( st__status(sts), NULL); + +ex: + return status; +} + + +/** -. + * */ int st__rm_status(struct estat *sts) { int status; @@ -341,9 +357,10 @@ /* Maybe no URL have been defined yet */ if (status != ENOENT) STOPIF(status, NULL); - STOPIF(ign__load_list(NULL), NULL); + STOPIF( ign__load_list(NULL), NULL); - if (opt__get_int(OPT__DIR_SORT)) + if (opt__get_int(OPT__DIR_SORT) && + !opt__get_int(OPT__STOP_ON_CHANGE)) { action->local_callback=st__progress; action->local_uninit=st__progress_uninit; @@ -354,7 +371,10 @@ if (opt__get_int(OPT__DIR_SORT)) - STOPIF( waa__do_sorted_tree(root, st__status), NULL); + { + action->local_callback=st__status; + STOPIF( waa__do_sorted_tree(root, ac__dispatch), NULL); + } ex: return status; @@ -519,18 +539,26 @@ #define BIT_INFO(v, s) { .val=v, .string=s, .str_len=strlen(s) } -/** Constructs a string from a bitmask, where one or more bits may be set. */ +/** Constructs a string from a bitmask, where one or more bits may be set. + * + * Must not be free()d. */ #define st___string_from_bits(v, a, t) _st___string_from_bits(v, a, sizeof(a)/sizeof(a[0]), t) volatile char *_st___string_from_bits(int value, const struct st___bit_info data[], int max, char *text_for_none) { + int status; + static struct cache_t *cache=NULL; static const char sep[]=", "; - static char *string=NULL; - static int len=0; + char *string; int i; int last_len, new_len; + struct cache_entry_t **cc; + + STOPIF( cch__new_cache(&cache, 4), NULL); + STOPIF( cch__add(cache, 0, NULL, 128, &string), NULL); + cc=cache->entries + cache->lru; last_len=0; if (string) *string=0; @@ -541,13 +569,9 @@ new_len = last_len + data[i].str_len + (last_len ? strlen(sep) : 0); - while (new_len + 8 > len) + if (new_len + 8 > (*cc)->len) { - if (!len) len=256; - len *= 2; - string=realloc(string, len); - /* Cannot use STOPIF_ENOMEM() - we want to return a char* */ - if (!string) return NULL; + STOPIF( cch__entry_set(cc, 0, NULL, new_len+64, 1, &string), NULL); string[last_len]=0; } @@ -568,6 +592,9 @@ } } +ex: + /* Is that good? */ + if (status) return NULL; /* If no bits are set, return "empty" */ return string && *string ? string : text_for_none; } @@ -610,25 +637,30 @@ } +char *st__type_string(mode_t mode) +{ + switch (mode & S_IFMT) + { + case S_IFDIR: return "directory"; + case S_IFBLK: return "block-dev"; + case S_IFCHR: return "char-dev"; + case S_IFREG: return "file"; + case S_IFLNK: return "symlink"; + } + + return "invalid"; +} + + inline volatile char* st__status_string(const struct estat * const sts) { return st__status_string_fromint(sts->entry_status); } -int st__print_entry_info(struct estat *sts, int with_type) +int st__print_entry_info(struct estat *sts) { int status; - const struct st___bit_info types[]={ - BIT_INFO( FT_IGNORE, "ignored"), - BIT_INFO( FT_CDEV, "char-dev"), - BIT_INFO( FT_BDEV, "block-dev"), - BIT_INFO( FT_DIR, "directory"), - BIT_INFO( FT_SYMLINK, "symlink"), - BIT_INFO( FT_FILE, "file"), - BIT_INFO( FT_UNKNOWN, "unknown"), - }; - char *path, *waa_path, *url, *copyfrom; svn_revnum_t copy_rev; @@ -643,73 +675,77 @@ STOPIF( cm__get_source(sts, path, ©from, ©_rev, 0), NULL); } - if (with_type) - status=printf("\tType:\t\t%s\n", - st___string_from_bits(sts->entry_type, types, "invalid") ); - if (sts->entry_type == FT_DIR) - status |= printf("\tChildCount:\t%u\n", sts->entry_count); - status |= printf("\tURL:\t\t%s\n", url); - status |= printf("\tStatus:\t\t0x%X (%s)\n", sts->entry_status, - st__status_string(sts)); - status |= printf("\tFlags:\t\t0x%X (%s)\n", - sts->flags & ~RF_PRINT, - st__flags_string_fromint(sts->flags)); + STOPIF_CODE_EPIPE( printf("\tType:\t\t%s\n", + st__type_string(sts->st.mode)), NULL); + if (S_ISDIR(sts->st.mode)) + STOPIF_CODE_EPIPE( printf( "\tChildCount:\t%u\n", + sts->entry_count), NULL); + STOPIF_CODE_EPIPE( printf("\tURL:\t\t%s\n", url), NULL); + STOPIF_CODE_EPIPE( printf("\tStatus:\t\t0x%X (%s)\n", + sts->entry_status, st__status_string(sts)), NULL); + STOPIF_CODE_EPIPE( printf("\tFlags:\t\t0x%X (%s)\n", + sts->flags & ~RF_PRINT, + st__flags_string_fromint(sts->flags)), NULL); if (opt_verbose && copyfrom) { - status |= printf("\tCopyfrom:\trev. %llu of %s\n", - (t_ull)copy_rev, copyfrom); + STOPIF_CODE_EPIPE( printf("\tCopyfrom:\trev. %llu of %s\n", + (t_ull)copy_rev, copyfrom), NULL); } - status |= printf("\tDev:\t\t%llu\n", (t_ull)sts->st.dev); - status |= printf("\tInode:\t\t%llu\n", (t_ull)sts->st.ino); - status |= printf("\tMode:\t\t0%4o\n", sts->st.mode); - status |= printf("\tUID/GID:\t%u (%s)/%u (%s)\n", - sts->st.uid, hlp__get_uname(sts->st.uid, "undefined"), - sts->st.gid, hlp__get_grname(sts->st.gid, "undefined") ); + STOPIF_CODE_EPIPE( printf("\tDev:\t\t%llu\n", + (t_ull)sts->st.dev), NULL); + STOPIF_CODE_EPIPE( printf("\tInode:\t\t%llu\n", + (t_ull)sts->st.ino), NULL); + STOPIF_CODE_EPIPE( printf("\tMode:\t\t0%4o\n", + sts->st.mode), NULL); + STOPIF_CODE_EPIPE( printf("\tUID/GID:\t%u (%s)/%u (%s)\n", + sts->st.uid, hlp__get_uname(sts->st.uid, "undefined"), + sts->st.gid, hlp__get_grname(sts->st.gid, "undefined") ), NULL); /* Remove the \n at the end */ - status |= printf("\tMTime:\t\t%.24s\n", ctime( &(sts->st.mtim.tv_sec) )); - status |= printf("\tCTime:\t\t%.24s\n", ctime( &(sts->st.ctim.tv_sec) )); + STOPIF_CODE_EPIPE( printf("\tMTime:\t\t%.24s\n", + ctime( &(sts->st.mtim.tv_sec) )), NULL); + STOPIF_CODE_EPIPE( printf("\tCTime:\t\t%.24s\n", + ctime( &(sts->st.ctim.tv_sec) )), NULL); STOPIF( waa__get_waa_directory(path, &waa_path, NULL, NULL, GWD_WAA), NULL); - status |= printf("\tWAA-Path:\t%s\n", waa_path); + STOPIF_CODE_EPIPE( printf("\tWAA-Path:\t%s\n", + waa_path), NULL); if (!sts->parent) { STOPIF( waa__get_waa_directory(path, &waa_path, NULL, NULL, GWD_CONF), NULL); - status |= printf("\tConf-Path:\t%s\n", waa_path); + STOPIF_CODE_EPIPE( printf("\tConf-Path:\t%s\n", + waa_path), NULL); } - status |= printf("\tRevision:\t%li\n", sts->repos_rev); - - if (sts->entry_type==FT_FILE) - status |= printf("\tRepos-MD5:\t%s\n", - cs__md52hex(sts->md5)); + /* The root entry has no URL associated, and so no revision number. + * Print the current revision of the highest priority URL. */ + STOPIF_CODE_EPIPE( printf("\tRevision:\t%li\n", + sts->parent ? sts->repos_rev : urllist[0]->current_rev), NULL); + + if (S_ISREG(sts->st.mode)) + STOPIF_CODE_EPIPE( printf("\tRepos-MD5:\t%s\n", + cs__md52hex(sts->md5)), NULL); - if (sts->entry_type==FT_BDEV || sts->entry_type==FT_CDEV) + if (S_ISBLK(sts->st.mode) || S_ISCHR(sts->st.mode)) + { #ifdef DEVICE_NODES_DISABLED DEVICE_NODES_DISABLED(); #else - status |= printf("\tDevice number:\t%llu:%llu\n", - (t_ull)MAJOR(sts->st.rdev), - (t_ull)MINOR(sts->st.rdev)); + STOPIF_CODE_EPIPE( printf("\tDevice number:\t%llu:%llu\n", + (t_ull)MAJOR(sts->st.rdev), + (t_ull)MINOR(sts->st.rdev)), NULL); + } #endif else - status |= printf("\tSize:\t\t%llu\n", (t_ull)sts->st.size); + STOPIF_CODE_EPIPE( printf("\tSize:\t\t%llu\n", + (t_ull)sts->st.size), NULL); /* Any last words? */ - status |= printf("\n"); - - DEBUGP("status at end of info is 0x%X", status); - - /* Had a printf a negative return value? */ - if (status < 0) - STOPIF_CODE_ERR( 1, errno ? errno : ENOSPC, - "Output error on printing entry info"); - else - status=0; + STOPIF_CODE_EPIPE( printf("\n"), NULL); ex: return status; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/status.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/status.h --- fsvs-1.1.14/src/status.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/status.h 2008-10-02 18:41:42.000000000 +0100 @@ -16,6 +16,8 @@ /** A function to show the local status of an entry. */ action_t st__status; +/** Status action. */ +action_t st__action; /** A function to show the remote status of an entry. */ action_t st__rm_status; /** The \ref status worker function. */ @@ -28,7 +30,7 @@ action_uninit_t st__progress_uninit; /** Shows detailed information about the entry. */ -int st__print_entry_info(struct estat *sts, int with_type); +int st__print_entry_info(struct estat *sts); /** Returns a string describing the \a entry_status bits of struct \a * estat. */ @@ -37,6 +39,8 @@ volatile char* st__status_string_fromint(int mask); /** Return the string interpretation of the flags like \ref RF_CHECK. */ volatile char* st__flags_string_fromint(int mask); +/** Return the type string - cdev, bdev, whatever. */ +char *st__type_string(mode_t mode); #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/sync.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/sync.c --- fsvs-1.1.14/src/sync.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/sync.c 2008-10-02 18:41:42.000000000 +0100 @@ -29,7 +29,7 @@ * * This is normally not needed; the use cases are * - debugging and - * - recovering from data loss in \c $FSVS_WAA (\c /var/spool/fsvs ). + * - recovering from data loss in \ref o_waa "$FSVS_WAA". * * It is (currently) important if you want to backup two similar * machines. Then you can commit one machine into a subdirectory of your @@ -82,6 +82,9 @@ #include "status.h" #include "checksum.h" #include "est_ops.h" +#include "cache.h" +#include "revert.h" +#include "props.h" #include "commit.h" #include "waa.h" #include "url.h" @@ -91,6 +94,146 @@ #include "helper.h" +/** Get entries of directory, and fill tree. + * + * Most of the data should already be here; we just + * fill the length of the entries in. + * */ +int sync___recurse(struct estat *cur_dir, + apr_pool_t *pool) +{ + int status; + svn_error_t *status_svn; + apr_pool_t *subpool, *subsubpool; + apr_hash_t *dirents; + char *path; + const char *name; + const void *key; + void *kval; + apr_hash_index_t *hi; + svn_dirent_t *val; + char *url; + struct svn_string_t *decoder; + struct estat *sts; + svn_stringbuf_t *entry_text; + + + status=0; + subpool=subsubpool=NULL; + + /* get a fresh pool */ + STOPIF( apr_pool_create_ex(&subpool, pool, NULL, NULL), + "no pool"); + + STOPIF( ops__build_path( &path, cur_dir), NULL); + DEBUGP("list of %s", path); + + STOPIF_SVNERR( svn_ra_get_dir2, + (current_url->session, + &dirents, NULL, NULL, + /* Use "" for the root, and cut the "./" for everything else. */ + (cur_dir->parent) ? path + 2 : "", + current_url->current_rev, + SVN_DIRENT_HAS_PROPS | SVN_DIRENT_HAS_PROPS | + SVN_DIRENT_KIND | SVN_DIRENT_SIZE, + subpool)); + + for( hi=apr_hash_first(subpool, dirents); hi; hi = apr_hash_next(hi)) + { + apr_hash_this(hi, &key, NULL, &kval); + name=key; + val=kval; + + + STOPIF( cb__add_entry(cur_dir, name, NULL, + NULL, 0, 0, NULL, 0, (void**)&sts), NULL); + + if (url__current_has_precedence(sts->url) && + !S_ISDIR(sts->st.mode)) + { + /* File or special entry. */ + sts->st.size=val->size; + + DEBUGP("%s has mode %o (%s)", sts->name, sts->st.mode, + st__type_string(sts->st.mode)); + + decoder= sts->user_prop ? + apr_hash_get(sts->user_prop, + propval_updatepipe, APR_HASH_KEY_STRING) : + NULL; + + if (S_ISREG(sts->st.mode) && !decoder) + { + /* Entry finished. */ + } + else if (S_ISREG(sts->st.mode) && val->size > 8192) + { + /* Make this size configurable? Remove altogether? After all, the + * processing time needs not be correlated to the encoded size. */ + DEBUGP("file encoded, but too big for fetching (%llu)", + (t_ull)val->size); + } + else + { + /* Now we're left with special devices and small, encoded files. */ + STOPIF( url__full_url(sts, NULL, &url), NULL); + + /* get a fresh pool */ + STOPIF( apr_pool_create_ex(&subsubpool, subpool, NULL, NULL), + "no pool"); + + /* That's the third time we access this file ... + * svn_ra needs some more flags for the directory listing functions. */ + STOPIF( rev__get_text_into_buffer(url, sts->repos_rev, + decoder ? decoder->data : NULL, + &entry_text, NULL, sts, NULL, subsubpool), NULL); + + sts->st.size=entry_text->len; + DEBUGP("parsing %s as %llu: %s", url, + (t_ull)sts->st.size, entry_text->data); + + /* If the entry exists locally, we might have a more detailed value + * than FT_ANYSPECIAL. */ + if (!S_ISREG(sts->st.mode)) + /* We don't need the link destination; we already got the MD5. */ + STOPIF( ops__string_to_dev(sts, entry_text->data, NULL), NULL); + + /* For devices there's no length to compare; the rdev field + * shares the space. + * And for normal files the size is already correct. */ + if (S_ISLNK(sts->st.mode)) + sts->st.size-=strlen(link_spec); + + if (subsubpool) apr_pool_destroy(subsubpool); + } + + /* After this entry is done we can return a bit of memory. */ + if (sts->user_prop) + { + apr_pool_destroy(apr_hash_get(sts->user_prop, "", 0)); + sts->user_prop=NULL; + } + } + + /* We have to loop even through obstructed directories - some + * child may not be overlayed. */ + if (val->kind == svn_node_dir) + { + STOPIF( sync___recurse( sts, subpool), NULL); + } + + } + +ex: + if (subpool) apr_pool_destroy(subpool); + + return status; +} + + +/** Repository callback. + * + * Here we get most data - all properties and the tree structure. */ int sync__progress(struct estat *sts) { int status; @@ -101,6 +244,10 @@ status=0; STOPIF( ops__build_path(&path, sts), NULL); + STOPIF( waa__delete_byext( path, WAA__FILE_MD5s_EXT, 1), NULL); + STOPIF( waa__delete_byext( path, WAA__PROP_EXT, 1), NULL); + + STOPIF( st__rm_status(sts), NULL); /* If the entry is a special node (symlink or device), we have @@ -133,10 +280,9 @@ * */ if ( hlp__lstat(path, &st) == 0 ) { - if (sts->entry_type == FT_ANYSPECIAL) + if ((sts->st.mode & S_IFMT) == 0) { sts->st=st; - sts->entry_type=ops___filetype(& sts->st); } /* We fetch the dev/inode to get a correct sorting. @@ -168,9 +314,9 @@ } else { - if (sts->entry_type == FT_ANYSPECIAL) + if (S_ISANYSPECIAL(sts->st.mode)) { - sts->entry_type=FT_FILE; + /* We don't know what it really is. BUG? */ sts->st.mode= (sts->st.mode & ~S_IFMT) | S_IFREG; } } @@ -184,7 +330,7 @@ return status; } - + /** -. * * Could possibly be folded into the new update. */ @@ -193,7 +339,8 @@ int status; svn_error_t *status_svn; svn_revnum_t rev; - int i; + char *strings; + int string_space; status=0; @@ -204,59 +351,37 @@ /* We cannot easily format the paths for arguments ... first, we don't * have any (normally) */ - for(i=0; iurl); + printf("sync-repos for %s rev\t%llu.\n", + current_url->url, (t_ull)rev); + /* We have nothing ... */ + current_url->current_rev=0; + STOPIF( cb__record_changes(root, rev, global_pool), NULL); - if (opt_target_revisions_given) - rev=opt_target_revision; - else - rev=current_url->target_rev; + /* set new revision */ + current_url->current_rev=rev; + STOPIF( ci__set_revision(root, rev), NULL); - /* Giving a simple SVN_INVALID_REVNUM to ->set_path() doesn't work - we - * get an error "Bogus revision report". Get the real HEAD. */ - /* \todo: get latest for each url, let user specify rev for each url */ - if (rev == SVN_INVALID_REVNUM) - { - STOPIF_SVNERR( svn_ra_get_latest_revnum, - (session, &rev, global_pool)); - DEBUGP("HEAD is at %ld", rev); - } - else - rev=opt_target_revision; + STOPIF( sync___recurse(root, current_url->pool), NULL); + } + STOPIF_CODE_ERR( status != EOF, status, NULL); - /* Say we don't have any data. */ - current_url->current_rev=0; + /* Take the correct values for the root. */ + STOPIF( hlp__lstat( ".", &root->st), NULL); + root->flags |= RF_CHECK; - STOPIF( cb__record_changes(session, root, rev, global_pool), NULL); - printf("%s for %s rev\t%llu.\n", - action->name[0], current_url->url, - (t_ull)rev); - if (!action->is_compare) - { - /* set new revision */ - DEBUGP("setting revision to %llu", (t_ull)rev); - STOPIF( ci__set_revision(root, rev), NULL); - } - } - if (action->is_compare) - { - /* This is for remote-status. Just nothing to be done. */ - } - else - { - /* See the comment at the end of commit.c - atomicity for writing - * these files. */ - STOPIF( waa__output_tree(root), NULL); - STOPIF( url__output_list(), NULL); - /* The copyfrom database is no longer valid. */ - STOPIF( waa__delete_byext(wc_path, WAA__COPYFROM_EXT, 1), NULL); - } + /* See the comment at the end of commit.c - atomicity for writing + * these files. */ + STOPIF( waa__output_tree(root), NULL); + /* The current revisions might have changed. */ + STOPIF( url__output_list(), NULL); + /* The copyfrom database is no longer valid. */ + STOPIF( waa__delete_byext(wc_path, WAA__COPYFROM_EXT, 1), NULL); ex: diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/tools/man-repair.pl /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/tools/man-repair.pl --- fsvs-1.1.14/src/tools/man-repair.pl 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/src/tools/man-repair.pl 2008-10-25 12:12:03.000000000 +0100 @@ -0,0 +1,50 @@ +#!/usr/bin/perl +# +# This has gone much too far. +# Doxygen just writes garbage as man page. +# But what else should I use? +# + +$output=shift || die "output?"; +$new_title=shift; + +($section) = ($output =~ m/(\d)$/); +open(STDOUT, "> $output") || die "write $output: $!"; + +$done=0; +$had_title=0; +while () +{ + # Change title and section. + $done=s{^ + (\.TH \s+) + "([^"]+)" + \s+ \d \s+ + }{ + $1 . + '"' . ($new_title || $2) . '"' . + " $section " + }ex unless $done; + + # Title again - it's merged with the first headline. + s/^(.*\S)\s*- (\.SH ".*")/($new_title || $1) . "\n" . $2/e; + + # Doxygen generates wrong lines before headlines. + if ($_ eq "\\fC \\fP\n") + { + $x=; + # Only print this string if next line is no header. + print $_ if $x !~ m/^\.S[HS]/; + + $_=$x; + } + + # \fC may not have a ' directly afterwards. + s#^\\fC'#\\fC '#; + + print($_) || die $!; +} + +close(STDOUT) || die $!; + +exit !$done; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/update.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/update.c --- fsvs-1.1.14/src/update.c 2008-03-25 06:20:11.000000000 +0000 +++ fsvs-1.1.17/src/update.c 2008-10-02 19:42:42.000000000 +0100 @@ -22,7 +22,7 @@ * \section update * * \code - * ## invalid ## fsvs update [-r rev] [working copy base] + * fsvs update [-r rev] [working copy base] * fsvs update [-u url@rev ...] [working copy base] * \endcode * @@ -58,6 +58,7 @@ #include "helper.h" #include "url.h" #include "status.h" +#include "racallback.h" #include "props.h" #include "checksum.h" #include "revert.h" @@ -74,7 +75,6 @@ static unsigned tmp_len=0; - /** Prefetch update-pipe property. * In case we're updating an existing file, we won't get \b all properties * sent - only changed. So we have to look for existing properties if we @@ -154,14 +154,16 @@ status=0; if (!utf8_value) { - DEBUGP("got NULL property for %s: %s", - sts->name, loc_name); - goto ex; + DEBUGP("got NULL property for %s: %s", sts->name, loc_name); + //goto ex; + loc_value=NULL; + } + else + { + STOPIF( hlp__utf82local(utf8_value->data, &loc_value, -1), NULL); + DEBUGP("got property for %s: %s=%s", + sts->name, loc_name, loc_value); } - - STOPIF( hlp__utf82local(utf8_value->data, &loc_value, -1), NULL); - DEBUGP("got property for %s: %s=%s", - sts->name, loc_name, loc_value); /* if an invalid utf8_value is detected, we'd better ignore it. @@ -286,38 +288,50 @@ else if (0 == strcmp(utf8_name, propname_special) && 0 == strcmp(utf8_value->data, propval_special)) { - if (sts->entry_type & FT_ANYSPECIAL) + if (S_ISANYSPECIAL(sts->st.mode)) { DEBUGP("already marked as special"); } else { - sts->entry_type = FT_ANYSPECIAL; + /* Remove any S_IFDIR and similar bits. */ + if (! (S_ISLNK(sts->updated_mode) || + S_ISCHR(sts->updated_mode) || + S_ISBLK(sts->updated_mode)) ) + sts->updated_mode = sts->st.mode = + (sts->st.mode & 07777) | S_IFANYSPECIAL; DEBUGP("this is a special node"); } } else if (0 == strcmp(utf8_name, propname_origmd5)) { - BUG_ON(sts->entry_type != FT_FILE); + /* Depending on the order of the properties we might not know whether + * this is a special node or a regular file; so we only disallow that + * for directories. */ + BUG_ON(S_ISDIR(sts->updated_mode)); STOPIF( cs__char2md5( utf8_value->data, sts->md5), NULL); DEBUGP("got a orig-md5: %s", cs__md52hex(sts->md5)); sts->has_orig_md5=1; } else { - if (action->needs_decoder && strcmp(utf8_name, propval_updatepipe) == 0) + if (strcmp(utf8_name, propval_updatepipe) == 0) { - /* \todo utf8->local?? */ - sts->decoder=strdup(utf8_value->data); - STOPIF_ENOMEM( !sts->decoder ); - DEBUGP("got a decoder: %s", sts->decoder); - sts->decoder_is_correct=1; + if (action->needs_decoder) + { + /* Currently we assume that programs (update- and commit-pipe) are + * valid regardless of codeset; that wouldn't work as soon as the + * programs' names includes UTF-8. + * + * \todo utf8->local?? */ + sts->decoder=strdup(utf8_value->data); + STOPIF_ENOMEM( !sts->decoder ); + sts->decoder_is_correct=1; + DEBUGP("got a decoder: %s", sts->decoder); + } } - /* for a status-editor (sync) we ignore completely, for an update we - * store them?? */ - /* ignore svn:entry:* properties */ - /* check for is_import_export */ + /* Ignore svn:entry:* properties, but store the updatepipe, too. */ if (!hlp__is_special_property_name(utf8_name)) { sts->remote_status |= FS_PROPERTIES; @@ -327,7 +341,6 @@ loc_name, loc_value); if (not_handled) *not_handled=1; } - goto ex; } @@ -390,47 +403,61 @@ * remove changed data. * * If an entry does not exist (ENOENT), it is ignored. + * + * Only entries that are registered from \a url are removed. + * + * If children that belong to other URLs are found we don't remove the + * directory. + * * \todo conflict */ -int up__rmdir(struct estat *sts) +int up__rmdir(struct estat *sts, struct url_t *url) { - int status, i; + int status, i, has_others; struct estat *cur; char *path; status=0; - + has_others=0; /* Remove children */ for(i=0; ientry_count; i++) { cur=sts->by_inode[i]; - /* Checking the contents of sts here is not allowed any more - - * it may (eg. on update) already contain newer data, and that can be - * anything -- a file, a link, ... */ - /* Just trying the unlink is a single system call, like getting the - * type of the entry with \c lstat(). */ - status=up__unlink(cur, NULL); - if (status == EISDIR) - status=up__rmdir(cur); - - STOPIF( status, "unlink of %s failed", cur->name); - } - - STOPIF( ops__build_path(&path, sts), NULL ); - status = rmdir(path) == -1 ? errno : 0; - - DEBUGP("removing %s: %d", path, status); - if (status == ENOENT) status=0; - STOPIF( status, "Cannot remove directory %s", path); - /** \todo remove the WAA data */ - // STOPIF( waa__delete_byext(fn, NULL, 1), NULL); + if (url && cur->url != url) + has_others++; + else + { + /* TODO: is that true any more? */ + /* Checking the contents of sts here is not allowed any more - + * it may (eg. on update) already contain newer data, and that can be + * anything -- a file, a link, ... */ + /* Just trying the unlink is a single system call, like getting the + * type of the entry with \c lstat(). */ + status=up__unlink(cur, NULL); + if (status == EISDIR) + status=up__rmdir(cur, url); + + STOPIF( status, "unlink of %s failed", cur->name); + } + } + + if (!has_others) + { + STOPIF( ops__build_path(&path, sts), NULL ); + status = rmdir(path) == -1 ? errno : 0; + + DEBUGP("removing %s: %d", path, status); + if (status == ENOENT) status=0; + STOPIF( status, "Cannot remove directory %s", path); + } ex: return status; } + /* The file has current properties, which we'd like * to replace with the saved. * But all not-set properties should not be modified. @@ -504,7 +531,7 @@ /* A chmod or utimes on a symlink changes the *target*, not * the symlink itself. Don't do that. */ - if (!S_ISLNK(sts->st.mode)) + if (!S_ISLNK(sts->updated_mode)) { /* We have a small problem here, in that we cannot change *only* the user * or group. It doesn't matter much; the problem case is that the owner @@ -517,16 +544,10 @@ status=chown(filename, sts->st.uid, sts->st.gid); if (status == -1) { - if (errno == EPERM) - STOPIF( wa__warn( WRN__CHOWN_EPERM, errno, - "Cannot chown \"%s\" to %d:%d", - filename, sts->st.uid, sts->st.gid), - NULL ); - else - STOPIF( wa__warn( WRN__CHOWN_OTHER, errno, - "Cannot chown \"%s\" to %d:%d", - filename, sts->st.uid, sts->st.gid), - NULL ); + STOPIF( wa__warn( errno==EPERM ? WRN__CHOWN_EPERM : WRN__CHOWN_OTHER, + errno, "Cannot chown \"%s\" to %d:%d", + filename, sts->st.uid, sts->st.gid), + NULL ); } } @@ -540,16 +561,10 @@ status=chmod(filename, sts->st.mode & 07777); if (status == -1) { - if (errno == EPERM) - STOPIF( wa__warn( WRN__CHMOD_EPERM, errno, - "Cannot chmod \"%s\" to 0%3o", - filename, sts->st.mode & 07777 ), - NULL ); - else - STOPIF( wa__warn( WRN__CHMOD_OTHER, errno, - "Cannot chmod \"%s\" to 0%3o", - filename, sts->st.mode & 07777 ), - NULL ); + STOPIF( wa__warn( errno == EPERM ? WRN__CHMOD_EPERM : WRN__CHMOD_OTHER, + errno, "Cannot chmod \"%s\" to 0%3o", + filename, sts->st.mode & 07777 ), + NULL ); } } @@ -578,78 +593,9 @@ } -int up__add_entry(struct estat *dir, - const char *path, - const char *copy_path, svn_revnum_t copy_rev, - struct estat **new) -{ - int status; - struct estat *sts; - const char *filename; - - - STOPIF_CODE_ERR(copy_path, EINVAL, - "don't know how to handle copy_path %s@%ld", - copy_path, copy_rev); - - DEBUGP("add entry %s", path); - /* The path should be done by open_directory descending. - * We need only the file name. */ - filename=ops__get_filename(path); - STOPIF( ops__find_entry_byname(dir, filename, &sts, 0), - "cannot lookup entry %s", path); - - /* This file already exists. Should we overwrite it silently? - * Possibly with a --force parameter. - * - * No problem if the entry has just been removed (or replaced). */ - if (sts) - { - /* To say it in another way: We have a problem IF - * - the entry is not ignored AND - * - it is not removed (ie. it exists now) */ - STOPIF_CODE_ERR( sts->entry_type != FT_IGNORE && - (sts->entry_status & FS_REMOVED) != FS_REMOVED, - EEXIST, "file %s already exists", path); - - /* Check for sts->url == NULL? */ - if (!url__current_has_precedence(sts->url)) goto no_prec; - - sts->remote_status=FS_REPLACED; - } - else - { - /* maybe a calloc is faster??? */ - sts=malloc(sizeof(*sts)); - STOPIF_ENOMEM(!sts); - memset(sts, 0, sizeof(*sts)); - STOPIF( ops__new_entries(dir, 1, &sts), NULL); - sts->name=strdup(filename); - STOPIF_ENOMEM(!sts->name); - - sts->parent=dir; - sts->remote_status=FS_NEW; - } - - dir->to_be_sorted=1; - - sts->url=current_url; - sts->entry_type=FT_UNKNOWN; - /* Until we know better: */ - sts->st.mode=0700; - /* To avoid EPERM on chmod() etc. */ - sts->st.uid=getuid(); - sts->st.gid=getgid(); - -no_prec: - *new=sts; - -ex: - return status; -} - - -/* we know it's a special file */ +/** Handling non-file non-directory entries. + * We know it's a special file, but not more; we have to take the filedata + * and retrieve the type. */ int up__handle_special(struct estat *sts, char *path, char *data, @@ -662,25 +608,25 @@ STOPIF( hlp__utf82local(cp, &cp, -1), NULL); sts->stringbuf_tgt=NULL; - sts->stringbuf_src=NULL; + DEBUGP("special %s has mode 0%o", path, sts->updated_mode); /* process */ - switch (sts->entry_type) + switch (sts->updated_mode & S_IFMT) { - case FT_CDEV: - case FT_BDEV: + case S_IFBLK: + case S_IFCHR: STOPIF_CODE_ERR( mknod(path, sts->st.mode, sts->st.rdev) == -1, errno, "mknod(%s)", path) ; break; - case FT_SYMLINK: + case S_IFLNK: STOPIF_CODE_ERR( symlink(cp, path) == -1, errno, "symlink(%s, %s)", cp, path); break; default: STOPIF_CODE_ERR(1, EINVAL, - "what kind of node is this??? (type=0x%X)", sts->entry_type); + "what kind of node is this??? (mode=0%o)", sts->updated_mode); } ex: @@ -722,76 +668,24 @@ } -svn_error_t *up__delete_entry(const char *utf8_path, - svn_revnum_t revision UNUSED, - void *parent_baton, - apr_pool_t *pool) -{ - int status, change; - struct estat *dir=parent_baton; - struct estat *sts; - char* path; - - STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); - - DEBUGP("deleting entry %s", path); - STOPIF( ops__find_entry_byname(dir, path, &sts, 0), NULL); - - BUG_ON(!sts, "entry %s not found", path); - - STOPIF( ops__build_path(&filename, sts), NULL); - - change=sts->entry_status; - sts->remote_status=FS_REMOVED; - STOPIF( st__rm_status(sts), NULL); - - if (!action->is_compare) - { - /* If the entry was not already removed, we have to do that */ - if ((change & FS_REPLACED) != FS_REMOVED) - { - if (S_ISDIR(sts->st.mode)) - STOPIF( up__rmdir(sts), NULL); - else - { - STOPIF( up__unlink(sts, filename), NULL); - STOPIF( waa__delete_byext(filename, WAA__FILE_MD5s_EXT, 1), NULL); - STOPIF( waa__delete_byext(filename, WAA__PROP_EXT, 1), NULL); - } - } - - if (sts) - STOPIF( ops__delete_entry(dir, sts, - UNKNOWN_INDEX, UNKNOWN_INDEX), NULL); - } - - -ex: - RETURN_SVNERR(status); -} - - svn_error_t *up__add_directory(const char *utf8_path, void *parent_baton, const char *utf8_copy_path, svn_revnum_t copy_rev, - apr_pool_t *dir_pool, + apr_pool_t *dir_pool UNUSED, void **child_baton) { struct estat *dir=parent_baton; struct estat *sts; int status; char* path; - char* copy_path; - STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); - STOPIF( hlp__utf82local(utf8_copy_path, ©_path, -1), NULL ); - STOPIF( up__add_entry(dir, path, copy_path, copy_rev, &sts), NULL ); + STOPIF( cb__add_entry(dir, utf8_path, &path, utf8_copy_path, + copy_rev, S_IFDIR, NULL, 1, + child_baton), NULL ); + sts=(struct estat*)*child_baton; - sts->entry_type |= FT_DIR; - *child_baton = sts; - sts->dir_pool=dir_pool; if (!action->is_compare) { /* this must be done immediately, because subsequent accesses may @@ -813,34 +707,6 @@ -svn_error_t *up__open_directory(const char *utf8_path, - void *parent_baton, - svn_revnum_t base_revision UNUSED, - apr_pool_t *dir_pool, - void **child_baton) -{ - struct estat *dir=parent_baton; - struct estat *sts; - int status; - char* path; - - STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); - - status=0; - STOPIF( ops__find_entry_byname(dir, path, &sts, 0), - "cannot find entry %s", path); - - - if (!dir) status=ENOENT; - - *child_baton = sts; - sts->dir_pool=dir_pool; - -ex: - RETURN_SVNERR(status); -} - - svn_error_t *up__change_dir_prop(void *dir_baton, const char *utf8_name, const svn_string_t *value, @@ -904,50 +770,10 @@ void **file_baton) { struct estat *dir=parent_baton; - struct estat *sts; int status; - char* path; - char* copy_path; - - STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); - STOPIF( hlp__utf82local(utf8_copy_path, ©_path, -1), NULL ); - - STOPIF( up__add_entry(dir, path, copy_path, copy_rev, &sts), - NULL); - sts->entry_type = FT_FILE; - *file_baton = sts; - -ex: - RETURN_SVNERR(status); -} - - -svn_error_t *up__open_file(const char *utf8_path, - void *parent_baton, - svn_revnum_t base_revision, - apr_pool_t *file_pool, - void **file_baton) -{ - int status; - struct estat *dir UNUSED=parent_baton; - struct estat *sts; - char* path; - - STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); - - STOPIF( ops__find_entry_byname(dir, path, &sts, 0), NULL); - - - if (!sts) - status=ENOENT; - else - /* In this call-chain we get the text-base directly, so we have to look - * on *opening* the file. */ - STOPIF( up__fetch_decoder(sts), NULL); - - - *file_baton = sts; + STOPIF( cb__add_entry(dir, utf8_path, NULL, utf8_copy_path, copy_rev, + S_IFREG, NULL, 1, file_baton), NULL); ex: RETURN_SVNERR(status); @@ -967,10 +793,12 @@ char* fn_utf8; apr_file_t *source, *target; struct encoder_t *encoder; + svn_stringbuf_t *stringbuf_src; - STOPIF( ops__build_path(&filename, sts), NULL); + stringbuf_src=NULL; encoder=NULL; + STOPIF( ops__build_path(&filename, sts), NULL); if (action->is_compare) { @@ -1004,13 +832,13 @@ strcpy(filename_tmp, filename); strcat(filename_tmp, ".up.tmp"); - DEBUGP("target is %s,", filename); + DEBUGP("target is %s (0%o),", filename, sts->updated_mode); DEBUGP(" temp is %s", filename_tmp); - if (sts->entry_type != FT_FILE) + if (!S_ISREG(sts->updated_mode)) { /* special entries are taken into a svn_stringbuf_t */ - if (S_ISLNK(sts->st.mode)) + if (S_ISLNK(sts->updated_mode)) { STOPIF( ops__link_to_string(sts, filename, &cp), NULL); @@ -1020,10 +848,10 @@ cp=ops__dev_to_filedata(sts); into_stringbufs: - sts->stringbuf_src=svn_stringbuf_create(cp, pool); + stringbuf_src=svn_stringbuf_create(cp, pool); sts->stringbuf_tgt=svn_stringbuf_create("", pool); - svn_s_src=svn_stream_from_stringbuf(sts->stringbuf_src, pool); + svn_s_src=svn_stream_from_stringbuf(stringbuf_src, pool); svn_s_tgt=svn_stream_from_stringbuf(sts->stringbuf_tgt, pool); status=0; } @@ -1056,7 +884,9 @@ * be used, as this is already destroyed by the time we get to * up__close_file, and an apr_pool_clear() then results in a segfault. * So we have to take the directories' pool. */ - STOPIF( apr_pool_create(&(sts->filehandle_pool), sts->parent->dir_pool), + /* We take a subpool of the global pool; that takes (tested) nearly + * resources, as it's destroyed in close_file(). */ + STOPIF( apr_pool_create(&(sts->filehandle_pool), global_pool), "Creating the filehandle pool"); /* If the file is new, has changed or is removed, @@ -1086,7 +916,7 @@ if (sts->decoder) { STOPIF( hlp__encode_filter(svn_s_tgt, sts->decoder, 1, - &svn_s_tgt, &encoder, sts->filehandle_pool), NULL); + filename, &svn_s_tgt, &encoder, sts->filehandle_pool), NULL); /* If the file gets decoded, use the original MD5 for comparision. */ encoder->output_md5= &(sts->md5); } @@ -1141,19 +971,16 @@ else { /* now we have a new md5 */ - DEBUGP("close file: md5=%s", - cs__md52hex(sts->md5)); + DEBUGP("close file (0%o): md5=%s", + sts->updated_mode, cs__md52hex(sts->md5)); - /* if there's no special property, it's an ordinary file. */ - if (sts->entry_type == FT_NONDIR) - sts->entry_type = FT_FILE; + BUG_ON(!sts->updated_mode); - - if (sts->entry_type == FT_FILE) + if (S_ISREG(sts->updated_mode)) { status=0; - /* See the comment in up__open_file, mark FHP. */ + /* See the comment mark FHP. */ /* This may be NULL if we got only property-changes, no file * data changes. */ if (sts->filehandle_pool) @@ -1166,6 +993,7 @@ } else { + DEBUGP("closing special file"); sts->stringbuf_tgt->data[ sts->stringbuf_tgt->len ]=0; STOPIF( up__handle_special(sts, filename_tmp, sts->stringbuf_tgt->data, pool), NULL); @@ -1222,31 +1050,6 @@ return SVN_NO_ERROR; } - - -const svn_delta_editor_t up__update_editor = -{ - .set_target_revision = up__set_target_revision, - - .open_root = up__open_root, - - .delete_entry = up__delete_entry, - .add_directory = up__add_directory, - .open_directory = up__open_directory, - .change_dir_prop = up__change_dir_prop, - .close_directory = up__close_directory, - .absent_directory = up__absent_directory, - - .add_file = up__add_file, - .open_file = up__open_file, - .apply_textdelta = up__apply_textdelta, - .change_file_prop = up__change_file_prop, - .close_file = up__close_file, - .absent_file = up__absent_file, - - .close_edit = up__close_edit, - .abort_edit = up__abort_edit, -}; /* ---CUT--- end of delta-editor */ @@ -1297,7 +1100,6 @@ int status; svn_error_t *status_svn; svn_revnum_t rev; - int i; time_t delay_start; @@ -1309,13 +1111,15 @@ STOPIF_CODE_ERR(!urllist_count, EINVAL, "There's no URL defined"); - + STOPIF( url__mark_todo(), NULL); STOPIF_CODE_ERR( argc != 0, EINVAL, "Cannot do partial updates!"); - if (!opt_checksum) opt_checksum++; + + opt__set_int(OPT__CHANGECHECK, PRIO_MUSTHAVE, + opt__get_int(OPT__CHANGECHECK) | CHCHECK_FILE); only_check_status=1; /* Do that here - if some other checks fail, it won't take so long @@ -1323,32 +1127,9 @@ STOPIF( waa__read_or_build_tree(root, argc, argv, argv, NULL, 0), NULL); only_check_status=0; - for(i=0; iurl); - - STOPIF( url__open_session(&session), NULL); - - if (opt_target_revisions_given) - rev=opt_target_revision; - else - rev=current_url->target_rev; - - /* Giving a simple SVN_INVALID_REVNUM to ->set_path() doesn't work - - * we get an error "Bogus revision report". Get the real HEAD. */ - /* \todo: get latest for each url, let user specify rev for each url */ - if (rev == SVN_INVALID_REVNUM) - { - STOPIF_SVNERR( svn_ra_get_latest_revnum, - (session, &rev, global_pool)); - DEBUGP("HEAD is at %ld", rev); - } - - - STOPIF( cb__record_changes(session, root, rev, global_pool), NULL); + STOPIF( cb__record_changes(root, rev, global_pool), NULL); if (action->is_compare) { @@ -1361,10 +1142,12 @@ DEBUGP("setting revision to %llu", (t_ull)rev); STOPIF( ci__set_revision(root, rev), NULL); - printf("Updated %s to revision\t%ld.\n", + printf("Updating %s to revision\t%ld.\n", current_url->url, rev); } } + STOPIF_CODE_ERR( status != EOF, status, NULL); + status=0; if (action->is_compare) { @@ -1372,7 +1155,7 @@ else { DEBUGP("fetching from repository"); - STOPIF( rev__do_changed(session, root, global_pool), NULL); + STOPIF( rev__do_changed(root, global_pool), NULL); /* See the comment at the end of commit.c - atomicity for writing * these files. */ diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/update.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/update.h --- fsvs-1.1.14/src/update.h 2008-02-19 05:57:01.000000000 +0000 +++ fsvs-1.1.17/src/update.h 2008-06-13 07:19:24.000000000 +0100 @@ -14,8 +14,6 @@ /** \file * \ref update action header file. */ -/** Progress callback. */ -action_t up__action; /** Main \ref update worker function. */ work_t up__work; @@ -31,13 +29,6 @@ int up__set_meta_data(struct estat *sts, const char *filename); -/** Insert a new entry into the structures. */ -int up__add_entry(struct estat *dir, - const char *path, - const char *copy_path, svn_revnum_t copy_rev, - struct estat **new); - - /** \name The delta-editor functions. * These are being used for remote-status. */ /** @{ */ @@ -48,21 +39,12 @@ svn_revnum_t base_revision, apr_pool_t *dir_pool UNUSED, void **root_baton); -svn_error_t *up__delete_entry(const char *path, - svn_revnum_t revision UNUSED, - void *parent_baton, - apr_pool_t *pool); svn_error_t *up__add_directory(const char *path, void *parent_baton, const char *copy_path, svn_revnum_t copy_rev, apr_pool_t *dir_pool UNUSED, void **child_baton); -svn_error_t *up__open_directory(const char *path, - void *parent_baton, - svn_revnum_t base_revision UNUSED, - apr_pool_t *dir_pool UNUSED, - void **child_baton); svn_error_t *up__change_dir_prop(void *dir_baton, const char *name, const svn_string_t *value, @@ -79,11 +61,6 @@ svn_revnum_t copy_rev, apr_pool_t *file_pool, void **file_baton); -svn_error_t *up__open_file(const char *path, - void *parent_baton, - svn_revnum_t base_revision, - apr_pool_t *file_pool, - void **file_baton); svn_error_t *up__apply_textdelta(void *file_baton, const char *base_checksum, apr_pool_t *pool, @@ -110,10 +87,8 @@ char *data, apr_pool_t *pool); -extern const svn_delta_editor_t up__update_editor; - int up__unlink(struct estat *sts, char *filename); -int up__rmdir(struct estat *sts); +int up__rmdir(struct estat *sts, struct url_t *url); int up__fetch_decoder(struct estat *sts); #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/url.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/url.c --- fsvs-1.1.14/src/url.c 2008-03-17 06:07:07.000000000 +0000 +++ fsvs-1.1.17/src/url.c 2008-10-25 12:14:21.000000000 +0100 @@ -12,6 +12,7 @@ #include #include + #include "url.h" #include "waa.h" #include "cache.h" @@ -156,8 +157,8 @@ * Using the target revision you can tell fsvs that it should use the given * revision number as destination revision - so update would go there, but * not further. - * Please note that the given revision number overrides the \c -r parameter - * - which sets the destination for all URLs. + * Please note that the given revision number overrides the \c -r + * parameter; this sets the destination for all URLs. * * The default target is \c HEAD. * @@ -190,8 +191,7 @@ * Currently we use about 92 bytes per entry. So we'd (unnecessarily) * increase the size by about 10%. * - * That's why there's an internal_number. - * */ + * That's why there's an url_t::internal_number. */ /** -. @@ -205,6 +205,13 @@ +/** Returns whether \a url should be handled. */ +static inline int url___to_be_handled(const struct url_t *url) +{ + return (!url__parm_list_used) || url->to_be_handled; +} + + /** -. * * Because this may be called below input_tree, returning \c ENOENT could @@ -336,6 +343,8 @@ eurl.internal_number=INVALID_INTERNAL_NUMBER; eurl.current_rev=0; eurl.target_rev=SVN_INVALID_REVNUM; + eurl.current_target_override=0; + eurl.head_rev=SVN_INVALID_REVNUM; cur=input; DEBUGP("input: %s", input); @@ -859,6 +868,8 @@ { int status; svn_error_t *status_svn; + apr_hash_t *cfg; + status=0; if (!current_url->pool) @@ -868,15 +879,17 @@ "no pool"); } + STOPIF( hlp__get_svn_config(&cfg), NULL); + /** Try svn_ra_reparent() */ if (!current_url->session) { - STOPIF_SVNERR_EXTRA( svn_ra_open, + STOPIF_SVNERR_TEXT( svn_ra_open, (& current_url->session, current_url->url, &cb__cb_table, NULL, /* cbtable, cbbaton, */ - NULL, /* config hash */ + cfg, /* config hash */ current_url->pool), - "Opening URL '%s' brought an error:", current_url->url); + "svn_ra_open(\"%s\")", current_url->url); if (session) *session = current_url->session; @@ -924,10 +937,13 @@ /** -. - * */ + * If an entry has \b no URL yet (is new), \a to_compare is \c NULL, and + * the \ref current_url has higher priority; this is common, and so done + * here too. */ int url__current_has_precedence(struct url_t *to_compare) { - return (current_url->priority <= to_compare->priority); + return to_compare==NULL || + (current_url->priority <= to_compare->priority); } @@ -960,25 +976,25 @@ switch (cp[1]) { case '\\': - status= EOF == fputc('\\', output); + STOPIF_CODE_EPIPE( fputc('\\', output), NULL); break; case 'n': - status= EOF == fputc('\n', output); + STOPIF_CODE_EPIPE( fputc('\n', output), NULL); break; case 'r': - status= EOF == fputc('\r', output); + STOPIF_CODE_EPIPE( fputc('\r', output), NULL); break; case 't': - status= EOF == fputc('\t', output); + STOPIF_CODE_EPIPE( fputc('\t', output), NULL); break; case 'f': - status= EOF == fputc('\f', output); + STOPIF_CODE_EPIPE( fputc('\f', output), NULL); break; case 'x': status= cp[2] && cp[3] ? cs__two_ch2bin(cp+2) : -1; STOPIF_CODE_ERR(status <0, EINVAL, "A \"\\x\" sequence must have 2 hex digits."); - status= EOF == fputc(status, output); + STOPIF_CODE_EPIPE( fputc(status, output), NULL); /* There's a +2 below. */ cp+=2; break; @@ -996,24 +1012,27 @@ { /* Allow internal number, too? */ case 'n': - status= EOF == fputs(url->name ?: "", output); + STOPIF_CODE_EPIPE( fputs(url->name ?: "", output), NULL); break; case 't': - status= EOF == fputs(hlp__rev_to_string(url->target_rev), - output); + STOPIF_CODE_EPIPE( fputs( + hlp__rev_to_string(url->target_rev), + output), NULL); break; case 'r': - status= EOF == fputs( hlp__rev_to_string(url->current_rev), - output); + STOPIF_CODE_EPIPE( fputs( + hlp__rev_to_string(url->current_rev), + output), NULL); break; case 'p': - status= 0 > fprintf(output, "%u", url->priority); + STOPIF_CODE_EPIPE( fprintf(output, "%u", + url->priority), NULL); break; case 'u': - status= EOF == fputs(url->url, output); + STOPIF_CODE_EPIPE( fputs(url->url, output), NULL); break; case '%': - status= EOF == fputc('%', output); + STOPIF_CODE_EPIPE( fputc('%', output), NULL); break; default: STOPIF_CODE_ERR(1, EINVAL, @@ -1025,26 +1044,15 @@ break; default: - status= EOF == fputc(*cp, output); + STOPIF_CODE_EPIPE( fputc(*cp, output), NULL); cp++; } - - if (status) - { - status=errno; - - /* Quit silently. */ - if (status == EPIPE) - status=0; - - goto ex; - - } } } -ex: + status=0; +ex: return status; } @@ -1246,10 +1254,12 @@ DEBUGP("URL %s mentioned multiple times", url->url); url->to_be_handled=1; - /* TODO: That should be better; -r should override given URLs without - * explicit revision. */ if (rev_str) - STOPIF( hlp__parse_rev(rev_str, NULL, & url->target_rev), NULL); + { + STOPIF( hlp__parse_rev(rev_str, NULL, + & url->current_target_rev), NULL); + url->current_target_override=1; + } url_string=strtok(NULL, delim); } @@ -1284,3 +1294,101 @@ ex: return status; } + + +/** -. + * + * DAV (http:// and https://) don't like getting \c + * SVN_INVALID_REVNUM on some operations; they throw an 175007 "HTTP + * Path Not Found", and "REPORT request failed on '...'". + * + * So we need the real \c HEAD. + * + * We try to be fast, and only fetch the value if we really need it. */ +int url__canonical_rev( struct url_t *url, svn_revnum_t *rev) +{ + int status; + svn_error_t *status_svn; + + + status=0; + status_svn=NULL; + if (*rev == SVN_INVALID_REVNUM) + { + if (url->head_rev == SVN_INVALID_REVNUM) + { + BUG_ON( !url->session ); + /* As we ask at most once we just use the connection's pool - that + * has to exist if there's a session. */ + STOPIF_SVNERR( svn_ra_get_latest_revnum, + (url->session, & url->head_rev, url->pool)); + + DEBUGP("HEAD of %s is at %ld", url->url, url->head_rev); + } + + *rev=url->head_rev; + } + + +ex: + return status; +} + + +/** -. + * Returns 0 as long as there's an URL to process; \c current_url is set, + * and opened. In \a target_rev the target revision (as per default of this + * URL, or as given by the user) is returned. + * + * If called with \a target_rev \c NULL, the internal index is reset, and + * no URL initialization is done. + * + * At the end of the list \c EOF is given. + * */ +int url__iterator(svn_revnum_t *target_rev) // , svn_revnum_t *cur_rev) +{ + int status; + static int last_index=-1; + svn_revnum_t rev; + + + status=0; + if (!target_rev) + { + last_index=-1; + goto ex; + } + + while (1) + { + last_index++; + if (last_index >= urllist_count) + { + DEBUGP("no more URLs."); + /* No more data. */ + status=EOF; + goto ex; + } + + current_url=urllist[last_index]; + if (url___to_be_handled(current_url)) break; + } + + STOPIF( url__open_session(NULL), NULL); + + + if (current_url->current_target_override) + rev=current_url->current_target_rev; + else if (opt_target_revisions_given) + rev=opt_target_revision; + else + rev=current_url->target_rev; + DEBUGP("doing URL %s @ %llu", current_url->url, (t_ull)rev); + + STOPIF( url__canonical_rev(current_url, &rev), NULL); + *target_rev = rev; + +ex: + return status; +} + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/url.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/url.h --- fsvs-1.1.14/src/url.h 2008-03-10 07:57:01.000000000 +0000 +++ fsvs-1.1.17/src/url.h 2008-06-09 06:08:45.000000000 +0100 @@ -67,9 +67,13 @@ int url__store_url_name(char *parm); -static inline int url__to_be_handled(const struct url_t *url) -{ - return (!url__parm_list_used) || url->to_be_handled; -} +/** Simple function setting \c current_url, and returning whether there's + * something to do. */ +int url__iterator(svn_revnum_t *target_rev); + + +/** Changes the revision number, if \c SVN_INVALID_REVNUM, to the real + * value. */ +int url__canonical_rev( struct url_t *url, svn_revnum_t *rev); #endif diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/.vimrc /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/.vimrc --- fsvs-1.1.14/src/.vimrc 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/src/.vimrc 2008-12-15 09:12:09.000000000 +0000 @@ -0,0 +1 @@ +:au BufNewFile,BufRead *.c syntax keyword Constant diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/waa.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/waa.c --- fsvs-1.1.14/src/waa.c 2008-03-25 06:23:26.000000000 +0000 +++ fsvs-1.1.17/src/waa.c 2008-10-25 12:16:34.000000000 +0100 @@ -24,6 +24,7 @@ #include "direnum.h" #include "options.h" #include "add_unvers.h" +#include "cache.h" #include "checksum.h" #include "helper.h" #include "global.h" @@ -51,19 +52,12 @@ /** The extension temporary files in the WAA get. */ static const char ext_tmp[]=".tmp"; -/** The base path of the WAA. */ -static char const *waa_path; -/** The length of \ref waa_path. */ -static int waa_len; -/** The base path of the configuration area. */ -static char const *conf_path; -/** The length of \ref conf_path. */ -static int conf_len; /** -. - * They are long enough to hold \ref waa_path plus the 3-level deep - * subdirectory structure for cache and data files. - * The \ref conf_path plus additional data gets it own buffers. + * They are long enough to hold the \ref OPT__WAA_PATH "waa path" plus the + * 3-level deep subdirectory structure for cache and data files. + * The \ref OPT__CONF_PATH "conf path" plus additional data gets it own + * buffers. * @{ */ char *waa_tmp_path, *waa_tmp_fn, *conf_tmp_path, *conf_tmp_fn; @@ -126,15 +120,16 @@ /** Convenience function for creating two paths. */ -inline void waa___init_path(char *dest, const char *const src, - int *len, char **eos) +inline void waa___init_path(enum opt__settings_e which, + char *dest, char **eos) { int l; l=0; if (strncmp(opt__get_string(OPT__SOFTROOT), - src, opt__get_int(OPT__SOFTROOT)) != 0 ) + opt__get_string(which), + opt__get_int(OPT__SOFTROOT)) != 0 ) { strcpy(dest, opt__get_string(OPT__SOFTROOT)); l=opt__get_int(OPT__SOFTROOT); @@ -143,7 +138,7 @@ dest[l++]=PATH_SEPARATOR; } - l+= strlen( strcpy(dest+l, src) ); + l+= strlen( strcpy(dest+l, opt__get_string(which) ) ); /* ensure a delimiter */ if (dest[l-1] != PATH_SEPARATOR) @@ -153,51 +148,60 @@ } *eos=dest + l; - *len=l; + opt__set_int(which, PRIO_MUSTHAVE, l); } + /** -. * If not a WAA-less operation, find the WAA and define an ignore * pattern. */ int waa__init(void) { int status; + char *cp; + int len; status=0; /* If we're doing an import/export operation, we must not use the waa * area. We may be running off a KNOPPIX CD, or whatever. * - * What we *need* is the conf directory ... it might have options for us. - * - * So waa_path is NULL, and serves as a validation point - every access - * tried will get a SEGV and can be debugged. */ - conf_path=getenv(CONF__PATH_ENV); - if (!conf_path ) conf_path="/etc/fsvs"; + * What we *need* is the conf directory ... it might have options for us. + * */ - /* at least /w or some such */ - conf_len=strlen(conf_path); - STOPIF_CODE_ERR( conf_len<3, EINVAL, - "environment variable " CONF__PATH_ENV " should be set to a directory"); + /** \todo remove when gcc doesn't warn about \c strlen("const") + * initializers. See debian bug #60xxxx. + * And see below for WAA_PATH, too. */ + if (opt__get_int(OPT__CONF_PATH)==0) + { + opt__set_string(OPT__CONF_PATH, PRIO_MUSTHAVE, DEFAULT_CONF_PATH); + opt__set_int(OPT__CONF_PATH, PRIO_MUSTHAVE, strlen(DEFAULT_CONF_PATH)); + } - if (!action->is_import_export) - { - waa_path=getenv(WAA__PATH_ENV); - if (!waa_path ) waa_path="/var/spool/fsvs"; + /* at least /w or some such */ + STOPIF_CODE_ERR( opt__get_int(OPT__CONF_PATH)<3, EINVAL, + "The CONF path is invalid; a (non-root) path is expected."); - waa_len=strlen(waa_path); - STOPIF_CODE_ERR( waa_len<3, EINVAL, - "environment variable " WAA__PATH_ENV " should be set to a directory"); - /* validate existence and save dev/inode for later checking */ - STOPIF_CODE_ERR( hlp__lstat(waa_path, &waa_stat) == -1, errno, - "stat() of waa-path '%s' failed. " - "Does your local storage area exist? ", waa_path); - DEBUGP("got the WAA as inode %llu", (t_ull)waa_stat.ino); + if (action->is_import_export) + { + /* So the WAA path is NULL, and serves as a validation point - every + * access tried will get a SEGV and can be debugged. */ + opt__set_string(OPT__WAA_PATH, PRIO_MUSTHAVE, NULL); + opt__set_int(OPT__WAA_PATH, PRIO_MUSTHAVE, 0); } else - waa_len=0; + { + if (opt__get_int(OPT__WAA_PATH)==0) + { + opt__set_string(OPT__WAA_PATH, PRIO_MUSTHAVE, DEFAULT_WAA_PATH); + opt__set_int(OPT__WAA_PATH, PRIO_MUSTHAVE, strlen(DEFAULT_WAA_PATH)); + } + + STOPIF_CODE_ERR( opt__get_int(OPT__WAA_PATH)<3, EINVAL, + "The WAA path should be set to a directory below \"/\"."); + } /* This memory has lifetime of the process. @@ -205,22 +209,66 @@ * The memory allocated is enough for the longest possible path. */ waa_tmp_path_len= opt__get_int(OPT__SOFTROOT) + 1 + - (waa_len > conf_len ? waa_len : conf_len) + 1 + + ( max(opt__get_int(OPT__WAA_PATH), + opt__get_int(OPT__CONF_PATH)) ) + 1 + APR_MD5_DIGESTSIZE*2 + 3 + WAA__MAX_EXT_LENGTH + strlen(ext_tmp) + 1 +4; DEBUGP("using %d bytes for temporary WAA+conf paths", waa_tmp_path_len); + + /* Here the paths are set at highest priority, so they can't get changed + * afterwards. */ conf_tmp_path=malloc(waa_tmp_path_len); STOPIF_ENOMEM(!conf_tmp_path); - waa___init_path(conf_tmp_path, conf_path, &conf_len, &conf_tmp_fn); + waa___init_path(OPT__CONF_PATH, conf_tmp_path, &conf_tmp_fn); if (!action->is_import_export) { waa_tmp_path=malloc(waa_tmp_path_len); STOPIF_ENOMEM(!waa_tmp_path); - waa___init_path(waa_tmp_path, waa_path, &waa_len, &waa_tmp_fn); - } + waa___init_path(OPT__WAA_PATH, waa_tmp_path, &waa_tmp_fn); + + /* validate existence and save dev/inode for later checking */ + STOPIF( hlp__lstat(waa_tmp_path, &waa_stat), + "!stat() of waa-path \"%s\" failed. " + "Does your local WAA storage area exist? ", + waa_tmp_path); + DEBUGP("got the WAA as inode %llu", (t_ull)waa_stat.ino); + + /* Only check whether it's there. */ + STOPIF_CODE_ERR( access(conf_tmp_path, W_OK)==-1, errno, + "!Cannot write to the FSVS_CONF path \"%s\".", + conf_tmp_path); + } + + /* Now no more changes of the softroot (eg. via the per-WC configuration) + * are allowed. */ + opt__set_int( OPT__SOFTROOT, PRIO_MUSTHAVE, + opt__get_int(OPT__SOFTROOT)); + setenv(opt__variable_from_option(OPT__SOFTROOT), + opt__get_string(OPT__SOFTROOT), 1); + + + /* Subversion doesn't like "//" in pathnames - even if it's just the + * local configuration area. So we have to normalize them. */ + len = opt__get_int(OPT__CONFIG_DIR)==0 ? + opt__get_int(OPT__CONF_PATH)+strlen(DEFAULT_CONFIGDIR_SUB)+1 : + opt__get_int(OPT__CONFIG_DIR); + cp=malloc(len+5); + STOPIF_ENOMEM(!cp); + + if (opt__get_int(OPT__CONFIG_DIR)==0) + hlp__pathcopy(cp, &len, + opt__get_string(OPT__CONF_PATH), DEFAULT_CONFIGDIR_SUB, NULL); + else + hlp__pathcopy(cp, &len, + opt__get_string(OPT__CONFIG_DIR), NULL); + + opt__set_string(OPT__CONFIG_DIR, PRIO_MUSTHAVE, cp); + opt__set_int(OPT__CONFIG_DIR, PRIO_MUSTHAVE, len); + + ex: return status; @@ -279,15 +327,29 @@ /** -. + * + * \note The mask used is \c 0777 - so mind your umask! */ +int waa__mkdir(char *dir, int including_last) +{ + int status; + STOPIF( waa__mkdir_mask(dir, including_last, 0777), NULL); +ex: + return status; +} + + +/** -. + * * If it already exists, no error is returned. * * If needed, the structure is generated recursively. - * With \a including_last being \c 0 you can give a filename, and make sure - * that the directories up to there are created. - * * - * \note The mask used is \c 0777 - so mind your umask! */ -int waa__mkdir(char *dir, int including_last) + * With \a including_last being \c 0 you can give a filename, and make sure + * that the directories up to there are created. Because of this we can't + * use \c apr_dir_make_recursive() - We'd have to cut the filename away, + * and this is done here anyway. + * */ +int waa__mkdir_mask(char *dir, int including_last, int mask) { int status; char *last_ps; @@ -314,12 +376,17 @@ DEBUGP("%s: last is %d", dir, including_last); /* Now the parent was done ... so we should not get ENOENT again. */ if (including_last) - STOPIF_CODE_ERR( mkdir(dir, 0777) == -1, errno, + STOPIF_CODE_ERR( mkdir(dir, mask & 07777) == -1, errno, "cannot mkdir(%s)", dir); } else STOPIF(status, "cannot lstat(%s)", dir); } + else + { + STOPIF_CODE_ERR( including_last && !S_ISDIR(buf.st_mode), ENOTDIR, + "\"%s\" is not a directory", dir); + } ex: return status; @@ -709,11 +776,14 @@ /** -. * - * If the \c unlink()-call succeeds, the directory levels above are removed, - * if possible. + * If the \c unlink()-call succeeds, the (max. 2) directory levels above + * are removed, if possible. * * Via the parameter \a ignore_not_exist the caller can say whether a * \c ENOENT should be returned silently. + * + * If \a extension is \c NULL, the given path already specifies a file, and + * is not converted into a WAA path. * * \see waa_files. */ int waa__delete_byext(char *path, @@ -724,9 +794,18 @@ char *cp, *eos; status=0; - STOPIF( waa__get_waa_directory(path, &cp, &eos, NULL, - waa__get_gwd_flag(extension)), NULL); - strcpy(eos, extension); + if (extension) + { + STOPIF( waa__get_waa_directory(path, &cp, &eos, NULL, + waa__get_gwd_flag(extension)), NULL); + strcpy(eos, extension); + } + else + { + cp=path; + eos=strrchr(cp, PATH_SEPARATOR); + BUG_ON(!eos); + } if (unlink(cp) == -1) { @@ -804,7 +883,7 @@ /** -. * * All entries are defined as new. */ -int waa__build_tree(struct estat *root) +int waa__build_tree(struct estat *dir) { int status; struct estat *sts; @@ -812,22 +891,22 @@ status=0; /* no stat info on first iteration */ - STOPIF( waa__dir_enum( root, 0, 0), NULL); + STOPIF( waa__dir_enum( dir, 0, 0), NULL); - DEBUGP("found %d entries ...", root->entry_count); + DEBUGP("found %d entries ...", dir->entry_count); have_ignored=0; have_found=0; - for(i=0; ientry_count; i++) + for(i=0; ientry_count; i++) { - sts=root->by_inode[i]; + sts=dir->by_inode[i]; STOPIF( ign__is_ignore(sts, &ignore), NULL); if (ignore>0) { DEBUGP("ignoring entry %s", sts->name); - sts->entry_type=FT_IGNORE; + sts->to_be_ignored=1; have_ignored=1; continue; } @@ -835,14 +914,13 @@ sts->path_level = sts->parent->path_level+1; /* in build_tree, it must be a new entry. */ sts->entry_status=FS_NEW; + STOPIF( ops__set_todo_bits(sts), NULL); approx_entry_count++; have_found++; if (S_ISDIR(sts->st.mode)) { - sts->entry_type=FT_DIR; - - if (opt_recursive>0) + if (ops__are_children_interesting(sts)) { STOPIF_CODE_ERR( chdir(sts->name) == -1, errno, "chdir(%s)", sts->name); @@ -854,20 +932,16 @@ "parent has gone"); } } - else - { - sts->entry_type=ops___filetype(&(sts->st)); - } STOPIF( ac__dispatch(sts), NULL); } if (have_ignored) /* Delete per index faster */ - STOPIF( ops__free_marked(root, 0), NULL); + STOPIF( ops__free_marked(dir, 0), NULL); if (have_found) - root->entry_status |= FS_CHANGED | FS_CHILD_CHANGED; + ops__mark_changed_parentcc(dir, entry_status); ex: return status; @@ -918,7 +992,7 @@ /* order is wrong - find new place for this element. */ bigger_eq=count-1; /* i is a smaller element, k a possibly higher */ -#ifdef DEBUG +#if 0 if (1) { char tmp[count*(18+1)+10]; @@ -1215,7 +1289,7 @@ { /* It's easy and possible to have always the correct number * of subdirectories in root->subdir_count. We'd just have - * to walk up to the root in build_tree and add_directory + * to walk up to the root in waa__build_tree and add_directory * and increment the number there. * * But @@ -1303,9 +1377,11 @@ * been (shallowly!) read - so subdirectories might not yet be up-to-date * yet. * - * The estat::do_this_entry and estat::do_tree flags are set, and depending - * on them (and opt_recursive) estat::entry_status is set. - * */ + * The estat::do_this_entry and estat::do_userselected flags are set, and + * depending on them (and opt_recursive) estat::entry_status is set. + * + * On \c chdir() an eventual \c EACCES is ignored, and the "maybe changed" + * status returned. */ int waa__update_dir(struct estat *old) { int dir_hdl, status; @@ -1326,11 +1402,15 @@ /* To avoid storing arbitrarily long pathnames, we just open this * directory and do a fchdir() later. */ dir_hdl=open(".", O_RDONLY | O_DIRECTORY); - STOPIF_CODE_ERR( dir_hdl==-1, errno, "saving current directory with open(.)"); + STOPIF_CODE_ERR( dir_hdl==-1, errno, + "saving current directory with open(.)"); DEBUGP("update_dir: chdir(%s)", path); - STOPIF_CODE_ERR( chdir(path) == -1, errno, - "chdir(%s)", path); + if (chdir(path) == -1) + { + if (errno == EACCES) goto ex; + STOPIF( errno, "chdir(%s)", path); + } /* Here we need the entries sorted by name. */ STOPIF( waa__dir_enum( ¤t, 0, 1), NULL); @@ -1338,7 +1418,7 @@ current.entry_count, old->entry_count, status); /* No entries means no new entries; but not old entries deleted! */ - if (current.entry_count == 0) goto ex; + if (current.entry_count == 0) goto after_compare; /* Now the directories get compared. @@ -1366,7 +1446,6 @@ { int status; int ignore; - struct estat tmp; STOPIF( ign__is_ignore(sts, &ignore), NULL); if (ignore>0) @@ -1381,18 +1460,20 @@ DEBUGP("found a new one!"); sts->entry_status=FS_NEW; + + /* Has to be done in that order, so that ac__dispatch() already finds + * sts->do_filter_allows set. */ + STOPIF( ops__set_todo_bits(sts), NULL); STOPIF( ac__dispatch(sts), NULL); + + ops__mark_parent_cc(sts, entry_status); approx_entry_count++; - STOPIF( ops__set_to_handle_bits(sts), NULL); /* if it's a directory, add all subentries, too. */ - /* Use the temporary variable to see whether child-entries are - * interesting to us. */ - tmp.parent=sts; - tmp.do_this_entry=tmp.do_tree=0; - STOPIF( ops__set_to_handle_bits(&tmp), NULL); - if (S_ISDIR(sts->st.mode) && tmp.do_this_entry) + if (S_ISDIR(sts->st.mode) && + ops__are_children_interesting(sts) && + (opt__get_int(OPT__FILTER) & FS_NEW)) { STOPIF_CODE_ERR( chdir(sts->name) == -1, errno, "chdir(%s)", sts->name); @@ -1436,18 +1517,19 @@ * like we're doing above in the by_name array. */ // IF_FREE(current.strings); - -ex: +after_compare: /* There's no doubt now. * The old entries have already been checked, and if there are new * we're sure that this directory has changed. */ old->entry_status &= ~FS_LIKELY; - /* If we find a new entry, we know that this directory has changed. */ + /* If we find a new entry, we know that this directory has changed. + * We cannot use the ops__mark_parent_* functions, as old can have no + * children that we could give. */ if (nr_new) - old->entry_status |= FS_CHANGED | FS_CHILD_CHANGED; - + ops__mark_changed_parentcc(old, entry_status); +ex: if (dir_hdl!=-1) { i=fchdir(dir_hdl); @@ -1722,27 +1804,110 @@ * before, so ->entry_count is defined as 0 (see ops__load_1entry()). * For replaced entries which are _now_ directories we'll always * get here, and waa__update_dir() will give us the children. */ - if (opt_recursive >=0 && - (sts->entry_status || - opt_checksum || + if ((sts->entry_status || + (opt__get_int(OPT__CHANGECHECK) & CHCHECK_DIRS) || (sts->flags & RF_ADD) || - (sts->flags & RF_CHECK) ) ) + (sts->flags & RF_CHECK) ) && + ops__are_children_interesting(sts) ) { - if (only_check_status) + if (only_check_status==1) DEBUGP("Only check & set status - no update_dir"); else { DEBUGP("dir_to_print | CHECK for %s", sts->name); STOPIF( waa__update_dir(sts), NULL); + + /* Now the status could have changed, and therefore the filter might + * now apply. */ + ops__calc_filter_bit(sts); } } -ex: /* Whether to do something with this directory or not shall not be * decided here. Just pass it on. */ - /* The path may not be valid here anymore. */ - STOPIF( ac__dispatch(sts), NULL); + if (ops__allowed_by_filter(sts)) + STOPIF( ac__dispatch(sts), NULL); +ex: + return status; +} + + +/** Does an update on the specified directory, and checks for completeness. + * We get here if all \b known children have been loaded, and have to look + * whether the subchildren are finished, too. + * */ +int waa___finish_directory(struct estat *sts) +{ + int status; + struct estat *walker; + + + status=0; + + walker=sts; + while (1) + { + DEBUGP("checking directory %s: %u unfini, %d of %d (%s)", + walker->name, + walker->unfinished, + walker->child_index, walker->entry_count, + st__status_string(walker)); + + if (walker->unfinished > 0) break; + + /* This (parent) might not be finished yet; but don't discard empty + * directories (should be only on first loop invocation - all other + * entries *have* at least a single child). */ + if (walker->entry_count == 0) + BUG_ON(walker != sts); + else if (walker->child_index < walker->entry_count) + break; + + DEBUGP("walker=%s; status=%s", + walker->name, st__status_string_fromint(walker->entry_status)); + + if (!S_ISDIR(walker->updated_mode) || + (walker->entry_status & FS_REPLACED) == FS_REMOVED) + { + /* If + * - it got replaced by another type, or + * - the directory doesn't exist anymore, + * we have already printed it. */ + } + else if (!(opt__get_int(OPT__FILTER) & FS_NEW)) + { + /* If new entries are not wanted, we simply do the callback - if it + * matches the users' wishes. */ + if (ops__allowed_by_filter(walker)) + STOPIF( ac__dispatch(walker), NULL); + } + else + { + /* Check the parent for added entries. Deleted entries have already + * been found missing while running through the list. */ + STOPIF( waa___check_dir_for_update(walker), NULL); + } + + + /* This directory is done, tell the parent. */ + walker=walker->parent; + if (!walker) break; + + + DEBUGP("%s has a finished child, now %d unfinished", + walker->name, walker->unfinished); + + /* We must not decrement if we don't count them. */ + if (walker->unfinished) + walker->unfinished--; + } + + if (walker == sts->parent && walker) + DEBUGP("deferring parent %s/%s (%d unfinished)", + walker->name, sts->name, walker->unfinished); + +ex: return status; } @@ -1750,8 +1915,8 @@ /** -. * * On input we expect a tree of nodes starting with \a root; the entries - * that need updating have estat::do_tree set, and their children get set - * via ops__set_to_handle_bits(). + * that need updating have estat::do_userselected set, and their children + * get set via ops__set_todo_bits(). * * On output we have estat::entry_status set; and the current \ref * action->local_callback gets called. @@ -1781,17 +1946,22 @@ { int status; struct estat *sts; - char *fullpath; + mode_t old_mode; - if (! (root->do_tree || root->do_a_child) ) + if (! (root->do_userselected || root->do_child_wanted) ) { /* If neither is set, waa__partial_update() wasn't called, so * we start from the root. */ - root->do_tree=root->do_this_entry=1; + root->do_userselected = + root->do_this_entry = + root->do_filter_allows_done = + root->do_filter_allows = 1; DEBUGP("Full tree update"); } + /* TODO: allow non-remembering behaviour */ + action->keep_children=1; status=0; while (cur_block) @@ -1801,58 +1971,62 @@ DEBUGP("doing update for %s ... %d left in %p", sts->name, cur_block->count, cur_block); - /* For directories initialize the child counter. */ + /* For directories initialize the child counter. + * We don't know the current type yet! */ if (S_ISDIR(sts->st.mode)) - sts->child_index=0; + sts->child_index = sts->unfinished = 0; + + old_mode=sts->st.mode; + STOPIF( ops__update_filter_set_bits(sts), NULL); + + if (!(sts->do_this_entry || sts->do_child_wanted)) + goto next; + + + /* Now sts->updated_mode has been set. */ + if (sts->entry_status) + ops__mark_parent_cc(sts, entry_status); if (sts->parent) { - STOPIF( ops__set_to_handle_bits(sts), NULL); + if (S_ISDIR(sts->st.mode)) + sts->parent->unfinished++; - /* If the parent's status is removed (or replaced), that tells us - * - the parent was a directory - * - the parent is no longer a directory - * So there can be no children now. */ if (sts->parent->entry_status & FS_REMOVED) - { - sts->entry_status=FS_REMOVED; goto next; - } } - if (!(sts->do_this_entry || sts->do_a_child)) - goto next; + if (sts->entry_status & FS_REMOVED) + { + if (sts->parent) + { + /* If this entry is removed, the parent has changed. */ + sts->parent->entry_status &= (~FS_LIKELY); + sts->parent->entry_status |= FS_CHANGED; + /* The FS_CHILD_CHANGED markings are already here. */ + } + /* If a directory is removed, we don't allocate the by_inode + * and by_name arrays, and it is set to no child-entries. */ + if (S_ISDIR(old_mode) && !action->keep_children) + sts->entry_count=0; - STOPIF( ops__build_path(&fullpath, sts), NULL); - if (sts->do_this_entry) - STOPIF( ops__update_single_entry(sts, fullpath), NULL); - - /* If this entry is removed, the parent has changed. */ - if ( (sts->entry_status & FS_REMOVED) && sts->parent) - sts->parent->entry_status = FS_CHANGED | - ( sts->parent->entry_status & (~FS_LIKELY) ); - - /* If a directory is removed, we don't allocate the by_inode - * and by_name arrays, and it is set to no child-entries. */ - if (S_ISDIR(sts->st.mode) && - (sts->entry_status & FS_REMOVED) && - !action->keep_children) - sts->entry_count=0; + if (S_ISDIR(sts->st.mode)) + sts->parent->unfinished--; + } - /* If this entry was exactly removed (not replaced), - * skip the next steps. - * The sub-entries will be found missing because the parent is removed. */ - if ((sts->entry_status & FS_REPLACED) == FS_REMOVED) - goto next; - if (S_ISDIR(sts->st.mode) && (sts->entry_status & FS_REPLACED)) + if (S_ISDIR(sts->updated_mode) && + (sts->entry_status & FS_REPLACED) == FS_REPLACED) { - /* This entry was replaced, ie. was another type before. - * So the shared members have wrong data - - * eg. entry_count, by_inode. We have to correct that here. - * That leads to an update_dir, which is exactly what we want. */ + /* This entry was replaced, ie. was another type before, and is a + * directory *now*. + * So the shared members have wrong data - eg. entry_count, by_inode. + * We have to correct that here. + * That leads to an waa__update_dir, which is exactly what we want. + * */ sts->entry_count=0; + sts->unfinished=0; sts->by_inode=sts->by_name=NULL; sts->strings=NULL; /* TODO: fill this members from the ignore list */ @@ -1860,32 +2034,29 @@ } - /* This is more or less the same as below, only for this entry and - * not its parent. */ - /* If this is a directory which had no children ... */ - if (S_ISDIR(sts->st.mode) && sts->entry_count==0) +next: + /* This is more or less the same as below, only for this entry and not + * its parent. */ + if (S_ISDIR(sts->updated_mode) && sts->entry_count==0) { - DEBUGP("doing empty directory %s", sts->name); + DEBUGP("doing empty directory %s %d", sts->name, sts->do_this_entry); /* Check this entry for added entries. There cannot be deleted * entries, as this directory had no entries before. */ - STOPIF( waa___check_dir_for_update(sts), NULL); - /* Mind: \a fullpath may not be valid anymore. */ + STOPIF( waa___finish_directory(sts), NULL); } -next: /* If this is a normal entry, we print it now. * Directories are shown after all child nodes have been checked. */ - if (sts->do_this_entry && - ( !S_ISDIR(sts->st.mode) || - (sts->entry_status & FS_REMOVED)) ) + if (!S_ISDIR(sts->updated_mode) && sts->do_this_entry) STOPIF( ac__dispatch(sts), NULL); - /* The parent must be done *after* the last child node ... at least that's - * what's documented above :-) */ - if (sts->parent && - !(sts->parent->entry_status & FS_REMOVED) ) + /* The parent must be done *after* the last child node ... at least + * that's what's documented above :-) */ + /* If there's a parent, and it's still here *or* we have to remember + * the children anyway ... */ + if (sts->parent && action->keep_children ) { sts->parent->child_index++; @@ -1897,10 +2068,15 @@ /* Check the parent for added entries. * Deleted entries have already been found missing while * running through the list. */ - STOPIF( waa___check_dir_for_update(sts->parent), NULL); + + STOPIF( waa___finish_directory(sts->parent), NULL); } else - DEBUGP("deferring parent %s/%s", sts->parent->name, sts->name); + DEBUGP("deferring parent %s/%s%s: %d of %d, %d unfini", + sts->parent->name, sts->name, + sts->parent->do_this_entry ? "" : " (no do_this_entry)", + sts->parent->child_index, sts->parent->entry_count, + sts->parent->unfinished); } @@ -2028,8 +2204,14 @@ * - If we find no base, we believe that we're at the root of the wc. * * The parameter must not be shown as "added" ("n...") - because it isn't. + * + * For the case that the WC root is \c "/", and we shall put a \c "./" in + * front of the normalized paths, we need an additional byte per argument, + * so that eg. \c "/etc" can be changed to \c "./etc" - see the PDS + * comments. * */ -int waa__find_common_base(int argc, char *args[], char **normalized[]) +int waa__find_common_base2(int argc, char *args[], char **normalized[], + int put_dotslash) { int status, i, j, longest_index; int len; @@ -2085,9 +2267,12 @@ * relative paths at the end. * We assume (yes, I know, "Silence of the lambs" :-) that all paths are of * the full length. + * + * Actually we'll put a NULL pointer in, too. * - * Actually we'll put a NULL pointer in, too. */ - len = argc * sizeof(char*) + sizeof(NULL) + len + len * argc; + * PDS! + * */ + len = argc * sizeof(char*) + sizeof(NULL) + len + len * argc + argc; DEBUGP("need %d bytes for %d args", len, argc); norm=malloc(len); /* IF(!norm)STOPIF_ENOMEM would be visually more appealing :-² */ @@ -2100,6 +2285,8 @@ * cases like "/a/wc" compared against "/a//wc". */ for(i=0; iarg) sts->arg= faked_arg0 ? "" : orig[i]; - /* This new entry is surely updated. - * But what about its parents? - * They're not in the blocks list (that we get as parameter), so - * they'd get wrong information. */ - - /* This is marked as full, parents as "look below". */ - sts->do_tree=sts->do_this_entry=1; - while (sts) - { - sts->do_a_child = 1; - sts->entry_status |= FS_CHILD_CHANGED; - if (sts->flags & RF_ADD) - { - /* If this entry was created by the O_CREAT flag, get some data. */ - // STOPIF( ops__update_single_entry(sts, NULL), NULL); - } + /* This entry is marked as full, parents as "look below". */ + sts->do_userselected = sts->do_this_entry = 1; + while ( (sts = sts->parent) ) + { + /* This new entry is surely updated. + * But what about its (new) parents? + * They're not in the blocks list (that we get as parameter), so + * they'd get wrong information on commit. */ + + if (sts->flags & RF_ISNEW) + STOPIF( ops__update_single_entry(sts, NULL), NULL); - sts=sts->parent; + sts->do_child_wanted = 1; } } @@ -2447,32 +2640,57 @@ } +/** Abbreviation function for tree recursion. */ +inline int waa___recurse_tree(struct estat **list, action_t handler, + int (*me)(struct estat *, action_t )) +{ + struct estat *sts; + int status; + + status=0; + while ( (sts=*list) ) + { + if (sts->do_this_entry && ops__allowed_by_filter(sts)) + STOPIF( handler(sts), NULL); + + /* If the entry was removed, sts->updated_mode is 0, so we have to take + * a look at the old sts->st.mode to determine whether it was a + * directory. */ + /* The OPT__ALL_REMOVED check is duplicated from ac__dispatch, to avoid + * recursing needlessly. */ + if ((sts->do_child_wanted || sts->do_userselected) && + sts->entry_count && + (sts->updated_mode ? + S_ISDIR(sts->updated_mode) : + ((sts->entry_status & FS_REMOVED) && + S_ISDIR(sts->st.mode) && + opt__get_int(OPT__ALL_REMOVED)==OPT__YES)) ) + STOPIF( me(sts, handler), NULL); + list++; + } + +ex: + return status; +} + + /** -. * */ int waa__do_sorted_tree(struct estat *root, action_t handler) { int status; - struct estat **list, *sts; - status=0; + /* Do the root as first entry. */ + if (!root->parent && root->do_this_entry) + STOPIF( handler(root), NULL); + if ( !root->by_name) STOPIF( dir__sortbyname(root), NULL); - /* Alternatively we could do a - * for(i=0; ientry_count; root++) - * */ - list=root->by_name; - while ( (sts=*list) ) - { - if (sts->do_this_entry) - STOPIF( handler(sts), NULL); - - if (sts->do_tree && sts->entry_type==FT_DIR) - STOPIF( waa__do_sorted_tree(sts, handler), NULL); - list++; - } + STOPIF( waa___recurse_tree(root->by_name, handler, + waa__do_sorted_tree), NULL); ex: IF_FREE(root->by_name); @@ -2563,7 +2781,7 @@ * entries are left and must be allocated. */ /* We re-use the name string. */ space=0; - for( tmp=to_append, left=append_count; left>0; left--, tmp++) + for( tmp=to_append, left=append_count; left>0; left--, tmp++, space--) { if (space) newdata++; @@ -2589,3 +2807,83 @@ } +/** -. + * + * If \a base_dir is \c NULL, a default path is taken; else the string is + * copied and gets an arbitrary postfix. If \a base_dir ends in \c + * PATH_SEPARATOR, \c "fsvs" is inserted before the generated postfix. + * + * \a *output gets set to the generated filename, and must not be \c + * free()d. */ +int waa__get_tmp_name(const char *base_dir, + char **output, apr_file_t **handle, + apr_pool_t *pool) +{ + int status; + static struct cache_t *cache; + static struct cache_entry_t *tmp_cache=NULL; + static const char to_append[]=".XXXXXX"; + static const char to_prepend[]="fsvs"; + char *filename; + int len; + + + STOPIF( cch__new_cache(&cache, 12), NULL); + + len= base_dir ? strlen(base_dir) : 0; + if (!len) + { + if (!tmp_cache) + { + /* This function caches the value itself, but we'd have to store the + * length ourselves; furthermore, we get a copy every time - which + * fills the pool, whereas we could just use our cache. */ + STOPIF( apr_temp_dir_get(&base_dir, pool), + "Getting a temporary directory path"); + + len=strlen(base_dir); + /* We need an extra byte for the PATH_SEPARATOR, and a \0. */ + STOPIF( cch__entry_set( &tmp_cache, 0, base_dir, + len +1 +1, 0, NULL), NULL); + + tmp_cache->data[len++]=PATH_SEPARATOR; + tmp_cache->data[len]=0; + + /* We set tmp_cache->len, which would be inclusive the alignment space + * at end, to the *actual* length, because we need that on every + * invocation. + * That works because tmp_cache is never changed again. */ + tmp_cache->len=len; + } + + len=tmp_cache->len; + base_dir=tmp_cache->data; + BUG_ON(base_dir[len] != 0); + } + + STOPIF( cch__add(cache, 0, base_dir, + /* Directory PATH_SEPARATOR pre post '\0' */ + len + 1 + strlen(to_prepend) + strlen(to_append) + 1 + 3, + &filename), NULL); + + if (base_dir[len-1] == PATH_SEPARATOR) + { + strcpy( filename + len, to_prepend); + len+=strlen(to_prepend); + } + + strcpy( filename + len, to_append); + /* The default values include APR_DELONCLOSE, which we only want if the + * caller is not interested in the name. */ + STOPIF( apr_file_mktemp(handle, filename, + APR_CREATE | APR_READ | APR_WRITE | APR_EXCL | + (output ? 0 : APR_DELONCLOSE), + pool), + "Cannot create a temporary file for \"%s\"", filename); + + if (output) *output=filename; + +ex: + return status; +} + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/waa.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/waa.h --- fsvs-1.1.14/src/waa.h 2008-03-19 06:41:44.000000000 +0000 +++ fsvs-1.1.17/src/waa.h 2008-10-10 15:57:00.000000000 +0100 @@ -38,11 +38,11 @@ struct waa__entry_blocks_t { /** Pointer to packed struct \c estat array. */ struct estat *first; - /** Number of entries in array */ - int count; /** Pointers for linked list. @{ */ struct waa__entry_blocks_t *next, *prev; /** @} */ + /** Number of entries in array */ + int count; }; @@ -54,15 +54,14 @@ /** \defgroup waa_files Files used by fsvs * \ingroup compat * - * FSVS uses various files to store its configuration and informations + * \c FSVS uses various files to store its configuration and informations * about the system it is running on. * - * Two file trees are used: - * - \c /var/spool/fsvs (if not overridden by \ref WAA__PATH_ENV - * "$FSVS_WAA"). + * Two file trees are used:
      + *
    • \c /var/spool/fsvs (if not overridden by \ref o_waa "$FSVS_WAA"). * The WAA stores volatile data that should not be backed up; the files * have only lower-case letters. - * - \c /etc/fsvs (or \ref CONF__PATH_ENV "$FSVS_CONF") + *
    • \c /etc/fsvs (or \ref o_conf "$FSVS_CONF") * This is used to store configuration data, eg. for the working copies. * The names of files stored here have the first letter in upper-case. * Having this data backed-up (eg. along with the rest of the filesystem) @@ -70,7 +69,12 @@ * The single exception are the \ref dir Files; these are, strictly seen, * per working copy, but are stored in the spool directory, as they are * reconstructed on restore and would only give conflicts with old - * versions. + * versions. \n + * Please note that it's entirely fine to keep this directory versioned + * via \c FSVS, to have the ignore patterns and URL list stored; and in + * fact that would happen automatically if you have \c /etc as working + * copy. + *
    * * Generally a path can be of (nearly) arbitrary length, and have every * character (except \c NUL [\c \\0]) in it. @@ -177,19 +181,32 @@ /* this should be optimized into a constant. * verified for gcc (debian 4.0.0-7ubuntu2) */ -#define WAA__MAX_EXT_LENGTH max( \ - max( max(strlen(WAA__DIR_EXT), strlen(WAA__PROP_EXT) ), \ - strlen(WAA__FILE_MD5s_EXT) ), \ - max(strlen(WAA__IGNORE_EXT), strlen(WAA__URLLIST_EXT) ) \ - ) - +#define WAA__MAX_EXT_LENGTH max( \ + max( \ + max(strlen(WAA__CONFLICT_EXT), \ + strlen(WAA__COPYFROM_EXT)), \ + strlen(WAA__IGNORE_EXT) ), \ + max( \ + max(max(strlen(WAA__DIR_EXT), \ + strlen(WAA__FILE_MD5s_EXT)), \ + max(strlen(WAA__PROP_EXT), \ + strlen(WAA__CONFLICT_EXT)) ), \ + max( \ + max(strlen(WAA__FILE_INODE_EXT), \ + strlen(WAA__DIR_INODE_EXT)), \ + max(strlen(WAA__FILE_NAME_EXT), \ + strlen(WAA__DIR_NAME_EXT)) ) ) ) /** Store the current working directory. */ int waa__save_cwd(char **where, int *len, int additional); /** Initialize WAA operations. */ int waa__init(void); + /** Create a directory; ignore \c EEXIST. */ +int waa__mkdir_mask(char *dir, int including_last, int mask); +/** Create a directory, ignore \c EEXIST, and use a default mask. */ int waa__mkdir(char *dir, int including_last); + /* Given an \a path and an \a extension, this function returns a * \a filehandle that was opened for this entry in the WAA with \a flags. */ int waa__open(char *path, @@ -240,8 +257,15 @@ /** Given a list of path arguments the \a base path and relative paths * are returned. */ -int waa__find_common_base(int argc, char *args[], - char ***normalized_paths); +int waa__find_common_base2(int argc, char *args[], + char ***normalized_paths, + int put_dotslash); +/** Wrapper for waa__find_common_base2. */ +static inline int waa__find_common_base(int argc, char *args[], + char **normalized[]) +{ + return waa__find_common_base2(argc, args, normalized, 0); +} /** Similar to \ref waa__find_common_base(), but allows only specification * of a WC root. */ int waa__find_base(struct estat *root, int *argc, char ***args); @@ -290,7 +314,8 @@ struct waa__entry_blocks_t *blocks); /** This function traverses the tree and calls the handler function - * for the marked entries. */ + * for the marked entries; directories before their children, and in order + * sorted by name. */ int waa__do_sorted_tree(struct estat *root, action_t handler); /** A wrapper around dir__enumerator(), ignoring entries below \c @@ -317,6 +342,12 @@ #define SET_REVNUM (-12) +/** Returns a distict name and filehandle. */ +int waa__get_tmp_name(const char *base_dir, + char **output, apr_file_t **handle, + apr_pool_t *pool); + + /** Our current WC base. */ extern char *wc_path; /** How much bytes the \ref wc_path has. */ diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/warnings.c /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/warnings.c --- fsvs-1.1.14/src/warnings.c 2008-02-20 09:13:46.000000000 +0000 +++ fsvs-1.1.17/src/warnings.c 2008-04-04 06:14:34.000000000 +0100 @@ -92,27 +92,6 @@ } -/** -. - * - * \todo Deprecate, as FSVS_WARNING is allowed? - * */ -int wa__init(void) -{ - int status; - char *warn; - - status=0; - warn=getenv(WARNINGS_ENV); - - if (warn) - STOPIF( wa__split_process(warn, PRIO_ENV), - "From environment variable %s", WARNINGS_ENV); - -ex: - return status; -} - - /** * -. * The given string is of the format \c warning=action. @@ -247,8 +226,7 @@ status=0; /* Flush all streams, so that this warnings occur *after* * every other status output. */ - STOPIF_CODE_ERR( fflush(NULL) == EOF, errno, - "Could not flush output streams"); + STOPIF_CODE_EPIPE( fflush(NULL), NULL); flag=0; for(i=0; i<_WRN__LAST_INDEX; i++) diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/src/warnings.h /tmp/FWQOjxAJQ0/fsvs-1.1.17/src/warnings.h --- fsvs-1.1.14/src/warnings.h 2008-02-20 09:13:46.000000000 +0000 +++ fsvs-1.1.17/src/warnings.h 2008-04-06 10:43:00.000000000 +0100 @@ -113,9 +113,6 @@ } warning_e; -/** Initialize the warning subsystem from the environment - \ref - * WARNINGS_ENV. */ -int wa__init(void); /** Possibly print a warning. */ int wa__warn(warning_e index, int status, char *format, ...) __attribute__ ((format (printf, 3, 4) )); diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/003_change_type /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/003_change_type --- fsvs-1.1.14/tests/003_change_type 2008-03-25 06:22:05.000000000 +0000 +++ fsvs-1.1.17/tests/003_change_type 2008-10-25 12:14:33.000000000 +0100 @@ -18,7 +18,11 @@ } fi -test -d typechange && rm -r typechange +if test -d typechange +then + rm -r typechange +fi + mkdir typechange pushd typechange > /dev/null @@ -37,12 +41,13 @@ $BINq ci -m "inserted types" -o delay=yes > $logfile rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` +$INFO "initial checkin is r$rev" # now goto other wc and update pushd $WC2 > /dev/null $BINq up -$BINq st +$BINdflt st > $logfile $COMPARE_1_2 popd > /dev/null @@ -54,7 +59,7 @@ do echo file > $i-file $ONLY_ROOT cp -a /dev/zero $i-device - ln -s $i-1 $i-symlink + ln -s $i-file $i-symlink mkdir $i-dir echo sub > $i-dir/sub-entry mkdir $i-dir/sub @@ -63,12 +68,22 @@ popd > /dev/null $BINq ci -m "changed types" -o delay=yes > $logfile +if [[ `$BINdflt st -C -C | wc -l` -ne 0 ]] +then + $BINdflt st -C -C + $ERROR "Entries left out of commit" +fi +$INFO "typechange done, running update" + +$BINdflt up $WC2 > $logfile +$INFO "update done" $WC2_UP_ST_COMPARE $SUCCESS "all types changed to other types." -exit -# Now we change WC1 back via update, and revert everything in WC2. +export FSVS_WARNING="mixed-rev-wc=ignore" + +# Now we change WC1 by using revert, and WC2 via update. $BINq up -r$rev $WC2 $BINq revert -r$rev -R -R . diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/004_delete /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/004_delete --- fsvs-1.1.14/tests/004_delete 2006-10-12 06:20:54.000000000 +0100 +++ fsvs-1.1.17/tests/004_delete 2008-06-05 07:31:49.000000000 +0100 @@ -9,8 +9,8 @@ rm -r tree/c echo " ci" -$BIN ci -m "deleted file, dir and symlink" +$BINq ci -m "deleted file, dir and symlink" echo " st1" -$BIN st +$BINq st $WC2_UP_ST_COMPARE diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/005_device_ops__uid0 /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/005_device_ops__uid0 --- fsvs-1.1.14/tests/005_device_ops__uid0 2007-05-21 07:42:59.000000000 +0100 +++ fsvs-1.1.17/tests/005_device_ops__uid0 2008-06-05 07:31:49.000000000 +0100 @@ -15,9 +15,9 @@ touch reclink echo " ci" - $BIN ci -m "changed dev to dir/dev and link to file" + $BINq ci -m "changed dev to dir/dev and link to file" echo " st1" - $BIN st + $BINq st $WC2_UP_ST_COMPARE else diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/006_move_entries /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/006_move_entries --- fsvs-1.1.14/tests/006_move_entries 2006-10-12 06:20:54.000000000 +0100 +++ fsvs-1.1.17/tests/006_move_entries 2008-06-05 07:31:49.000000000 +0100 @@ -9,8 +9,8 @@ mv tree/b tree_b echo " ci" -$BIN ci -m "renamed two directories" +$BINq ci -m "renamed two directories" echo " st1" -$BIN st +$BINq st $WC2_UP_ST_COMPARE diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/007_update_changed /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/007_update_changed --- fsvs-1.1.14/tests/007_update_changed 2007-05-07 13:32:21.000000000 +0100 +++ fsvs-1.1.17/tests/007_update_changed 2008-07-18 06:11:35.000000000 +0100 @@ -11,20 +11,20 @@ echo "A testline #2" >> $filename echo " ci1" -$BIN ci -m "new file" +$BINq ci -m "new file" orig=`md5sum $filename` $WC2_UP_ST_COMPARE echo "A further line" >> $filename echo " ci2" -$BIN ci -m "new file" +$BINq ci -m "new file" pushd $WC2 > /dev/null echo "A newly changed line" >> $filename echo " up1" -if $BIN up 2> /dev/null +if $BINq up 2> /dev/null then $ERROR "The modified file was overwritten!" else @@ -33,7 +33,7 @@ #$BINdflt -d revert $filename -if $BIN revert $filename +if $BINq revert $filename then $SUCCESS "The modified file was reverted." else @@ -50,13 +50,12 @@ $ERROR "Revert did not work!" fi -if [[ `$BINdflt st | grep -v "\.$"` == "" ]] +if [[ `$BINdflt st`x == "x" ]] then $SUCCESS "No status output after revert." else - $ERROR_NB "Some status change??" $BINdflt st - exit 1 + $ERROR "Some status change??" fi rm $filename diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/008_update_to_rev /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/008_update_to_rev --- fsvs-1.1.14/tests/008_update_to_rev 2007-05-29 09:12:38.000000000 +0100 +++ fsvs-1.1.17/tests/008_update_to_rev 2008-07-18 06:11:35.000000000 +0100 @@ -5,6 +5,8 @@ $INCLUDE_FUNCS cd $WC +logfile=$LOGDIR/008.log + filename=update-file.upd dir=2313 file2=$dir/garble @@ -23,11 +25,11 @@ if [[ -e $filename ]] then rm $filename - $BIN ci -m "delete the test-file" + $BINq ci -m "delete the test-file" fi # this next line has two tabulators - in grep and cut -rev=`$BIN up | grep "revision " | tail -1 | cut -f2 -d" " | cut -f1 -d"."` +rev=`$BINdflt up | grep "revision " | tail -1 | cut -f2 -d" " | cut -f1 -d"."` echo "now at rev. $rev" @@ -41,16 +43,16 @@ echo " ci1" VL -$BIN ci -m "new file" +$BINq ci -m "new file" echo "A further line" >> $filename echo $file2 >> $file2 echo " ci2" VL -$BIN ci -m "new file" +$BINq ci -m "new file" VL -$BIN up -r$rev +$BINq up -r$rev VL if [[ -e $filename ]] then @@ -64,10 +66,9 @@ $SUCCESS "remote-status says to-be-done (1)." else $ERROR " remote-status failed (1)!" - exit 1 fi -$BIN up -r`expr $rev + 1` +$BINq up -r`expr $rev + 1` VL if [[ `wc -l < $filename` -ne 2 ]] then @@ -83,7 +84,7 @@ $ERROR " remote-status failed (2)!" fi -$BIN up -r`expr $rev + 2` +$BINq up -r`expr $rev + 2` VL if [[ `wc -l < $filename` -ne 3 ]] then @@ -96,25 +97,25 @@ then $SUCCESS " remote-status to old revision says to-be-deleted." else - $ERROR " remote-status failed (3)!" - exit 1 + $ERROR " remote-status -r $rev failed (3)!" fi -if [[ `$BINdflt remote-status | grep $filename` == "" ]] +$BINdflt remote-status > $logfile +if grep $filename < $logfile then - $SUCCESS " remote-status says nothing." -else + cat $logfile $ERROR " remote-status failed (4)!" - exit 1 +else + $SUCCESS " remote-status says nothing." fi touch -t 200406271837 $filename -if [[ `$BINdflt remote-status | grep $filename` == "" ]] +$BINdflt remote-status > $logfile +if grep $filename < $logfile then - $SUCCESS " remote-status on touched file says nothing." -else $ERROR " remote-status failed (5)!" - exit 1 +else + $SUCCESS " remote-status on touched file says nothing." fi VL diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/009_bigger_files /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/009_bigger_files --- fsvs-1.1.14/tests/009_bigger_files 2007-09-26 06:46:32.000000000 +0100 +++ fsvs-1.1.17/tests/009_bigger_files 2008-06-05 07:31:49.000000000 +0100 @@ -41,7 +41,7 @@ if [[ -e $filename ]] then rm $filename - $BIN ci -m "delete the test-file" + $BINq ci -m "delete the test-file" fi sparse_md5s=`$PATH2SPOOL $sparse md5s` @@ -56,7 +56,7 @@ seq 1 199999 > $filename dd if=/dev/null of=$sparse bs=1024 count=1 seek=4096 2> /dev/null echo " ci1" -$BIN ci -m "big files" +$BINq ci -m "big files" ci_md5=`$PATH2SPOOL $filename md5s` echo $ci_md5 CheckSyntax $filename $ci_md5 @@ -70,7 +70,7 @@ echo "Another line" >> $filename echo " ci2" -$BIN ci -m "big file 2" +$BINq ci -m "big file 2" CheckSyntax $filename $ci_md5 if [[ -e $ci_md5 ]] @@ -90,7 +90,8 @@ $SUCCESS "Update and commit give the same manber-hashes and MD5s" else $ERROR_NB "Update and commit give DIFFERENT manber-hashes and/or MD5s!!" - diff -uw $ci_md5 $up_md5 + ls -la $ci_md5 $up_md5 2> /dev/null + diff -uw $ci_md5 $up_md5 2> /dev/null $ERROR "Update and commit disagree" fi @@ -99,7 +100,7 @@ # now delete the file and test if the .../md5s is gone. rm $filename -$BIN ci -m "delete the big test-file" +$BINq ci -m "delete the big test-file" $WC2_UP_ST_COMPARE diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/010_non-existing_uids /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/010_non-existing_uids --- fsvs-1.1.14/tests/010_non-existing_uids 2007-12-05 07:04:02.000000000 +0000 +++ fsvs-1.1.17/tests/010_non-existing_uids 2008-06-05 07:31:49.000000000 +0100 @@ -27,16 +27,16 @@ if [[ -e $filename ]] then rm $filename - $BIN ci -m "delete the test-file" + $BINq ci -m "delete the test-file" fi date > $filename chown $uid.$gid $filename echo " ci1" -$BIN ci -m "uid-gid-test" +$BINq ci -m "uid-gid-test" echo "Another line" >> $filename echo " ci2" -$BIN ci -m "uid-gid-expand" +$BINq ci -m "uid-gid-expand" $WC2_UP_ST_COMPARE diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/011_ignore /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/011_ignore --- fsvs-1.1.14/tests/011_ignore 2008-02-22 06:05:54.000000000 +0000 +++ fsvs-1.1.17/tests/011_ignore 2008-10-25 12:17:04.000000000 +0100 @@ -12,6 +12,9 @@ # - not ignoring a file (take-pattern) # - all of these in subdirectories +logfile=$LOGDIR/011.ignore +logfile_all=$logfile.all + export PREFIX=igntest export TAKE=TAKE @@ -71,29 +74,29 @@ done -all_new=`$BINdflt st | grep $POSTFIX | wc -l` -take_new=`$BINdflt st | grep $POSTFIX | grep $TAKE | wc -l` -take_new_list=`$BINdflt st | grep $POSTFIX | grep $TAKE` +$BINdflt st | grep $POSTFIX > $logfile_all +all_new=`wc -l < $logfile_all` +take_new=`grep $TAKE < $logfile_all | wc -l` echo $all_new new files, $take_new to take. PATTERN_COUNT=15 # We need the patterns in the order take, ignore. # Test the --prepend here, too. -$BIN ignore "./**$PREFIX-s**" "PCRE:.*$PREFIX-p\." -$BIN ignore prepend "t./**$PREFIX-s.$TAKE**" "tPCRE:.*$PREFIX-p\.$TAKE" -$BIN ignore at=1 "ti./*/dir/*/W?LD/**/$PREFIX-s.$TAKE**" -$BIN ignore at=1 "ti./**/$PREFIX-[ef].$TAKE**" -$BIN ignore "t/**$PREFIX-S.$TAKE**" "/**$PREFIX-S**" -$BIN ignore "t$WC/**$PREFIX-A.$TAKE**" "$WC/**$PREFIX-A**" -$BIN ignore "i./**/$PREFIX-[ef]**" -$BIN ignore prepend "DEVICE:<0" "DEVICE:>=0xff:0xff" +$BINq ignore "./**$PREFIX-s**" "PCRE:.*$PREFIX-p\." +$BINq ignore prepend "t./**$PREFIX-s.$TAKE**" "tPCRE:.*$PREFIX-p\.$TAKE" +$BINq ignore at=1 "ti./*/dir/*/W?LD/**/$PREFIX-s.$TAKE**" +$BINq ignore at=1 "ti./**/$PREFIX-[ef].$TAKE**" +$BINq ignore "t/**$PREFIX-S.$TAKE**" "/**$PREFIX-S**" +$BINq ignore "t$WC/**$PREFIX-A.$TAKE**" "$WC/**$PREFIX-A**" +$BINq ignore "i./**/$PREFIX-[ef]**" +$BINq ignore prepend "DEVICE:<0" "DEVICE:>=0xff:0xff" ignored_file=$PREFIX-perinode-$POSTFIX touch $ignored_file -$BIN ignore prepend `perl -e '@f=stat(shift); $f[1] || die $!; printf "INODE:%d:%d:%d", $f[0] >> 8, $f[0] & 0xff, $f[1];' $ignored_file` +$BINq ignore prepend `perl -e '@f=stat(shift); $f[1] || die $!; printf "INODE:%d:%d:%d", $f[0] >> 8, $f[0] & 0xff, $f[1];' $ignored_file` # this should never match -$BIN ignore prepend "DEVICE:0xff:0xff" +$BINq ignore prepend "DEVICE:0xff:0xff" if [[ `$BINdflt ignore dump | wc -l` -eq $PATTERN_COUNT ]] then @@ -121,19 +124,19 @@ then $SUCCESS "'ignore dump | ignore load' gives identity" else - $ERROR_NB "ignore dump/load error" echo "**** Got: $after" echo "**** expected: $before" echo "**** Transfer said: $transfer" - exit 1 + $ERROR "ignore dump/load error" fi -filt_new=`$BINdflt st | grep $POSTFIX | wc -l` +$BINdflt st | grep $POSTFIX > $logfile +filt_new=`wc -l < $logfile` echo $filt_new after filtering. if [[ $filt_new -ne $take_new ]] then - $BINdflt st | grep $POSTFIX + cat $logfile $ERROR " mismatch - $filt_new got, $take_new expected!" fi @@ -197,9 +200,8 @@ then $SUCCESS "empty state." else - $ERROR_NB "not emptied:" cat $ign_file - exit 1 + $ERROR "not emptied" fi $BINq ignore prepend ./1 @@ -212,9 +214,8 @@ then $SUCCESS "pattern edit operations work." else - $ERROR_NB "dump gives wrong results:" cat $file - exit 1 + $ERROR "dump gives wrong results" fi rm $file @@ -236,15 +237,19 @@ fi +$BINq ci -mO + # Now we put the WAA in there and test again, to see whether it gets # correctly ignored. new=$WC/waa cp -a $FSVS_WAA $new FSVS_WAA=$new -if [[ `$BIN st -C` == "" ]] +# Only "." and "waa" may be shown - nothing below. +if [[ `$BINdflt st -C | wc -l` -le 2 ]] then $SUCCESS "WAA gets ignored" else + $BINdflt st -C $ERROR "WAA would get versioned" fi @@ -268,3 +273,24 @@ $SUCCESS "Absolute ignore pattern warnings tests" + +# Test the "dir-only" specification +true | $BINdflt ignore load +$BINq ci -m1 +# now we're clean. +mkdir -p deep/a/b/c/dir +touch deep/fileA +touch deep/a/fileB +touch deep/a/b/fileC +touch deep/a/b/c/fileD +touch deep/a/b/c/dir/file-ok +$BINq ignore 't./deep/**ok*' 'dt./deep/**' './deep/**' +$BINq ci -m1 +if [[ `$BINdflt log -v -r HEAD | grep file | wc -l` -eq 1 ]] +then + $SUCCESS "dir-only pattern looks ok." +else + $ERROR "wrong commit for dir-only pattern" +fi + + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/012_export /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/012_export --- fsvs-1.1.14/tests/012_export 2007-08-02 14:49:48.000000000 +0100 +++ fsvs-1.1.17/tests/012_export 2008-06-05 07:31:15.000000000 +0100 @@ -20,7 +20,7 @@ mkdir $EXPDIR cd $EXPDIR -$BIN export $REPURL +$BINq export $REPURL if [[ -f $dir_path ]] then @@ -34,8 +34,8 @@ mkdir $EXPDIR cd $EXPDIR -$BIN export -r 3 $REPURL -$BIN up -r 3 $WC +$BINq export -r 3 $REPURL +$BINq up -r 3 $WC $COMPAREWITH $EXPDIR diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/013_manber /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/013_manber --- fsvs-1.1.14/tests/013_manber 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/tests/013_manber 2008-08-13 06:26:31.000000000 +0100 @@ -31,6 +31,10 @@ timestamp=200602231527.00 +# The test uses the "fast" behaviour, ie. where showing "?" for a file is +# allowed. +echo change_check=none >> $FSVS_CONF/config + function L() { if [[ "$VERBOSE" != "" ]] @@ -55,13 +59,12 @@ then $SUCCESS " $OK ('$TS', with '$OPT')" else - $ERROR_NB " $NOK (expected '$TS', with '$OPT')" - $ERROR_NB " '"$*"'" if [[ "$VERBOSE" != "" ]] then $BINdflt st $OPT fi - exit 1 + $ERROR_NB " $NOK (expected '$TS', with '$OPT')" + $ERROR " '"$*"'" fi } @@ -71,7 +74,7 @@ L echo " ci1" - $BIN ci -m "big file" -o delay=yes + $BINq ci -m "big file" -o delay=yes # Data and mtime not changed T "......" 'Ok, not seen as changed (1)' 'Seen as changed? (1)' '-v' @@ -154,7 +157,6 @@ if [[ `diff -U0 $file1 $file2 | egrep "^[^ ]" | wc -l` -ne 5 ]] then $ERROR 'NOK, more than one line changed??' - exit 1 fi # ignore the 2nd file @@ -166,7 +168,7 @@ TestExtensively 0 rm $file1 -$BIN ci -m "del big file" +$BINq ci -m "del big file" # test with empty directory @@ -174,11 +176,11 @@ mkdir $subdir2 mkdir $file1 $file2 -$BIN ci -m "empty dir 1" +$BINq ci -m "empty dir 1" TestExtensively 1 -$BIN ci -m "empty dir 2" +$BINq ci -m "empty dir 2" # test with non-empty directory @@ -189,11 +191,11 @@ # make 2nd timestamp equal rsync $subdir1/ $subdir2/ -a -$BIN ci -m "dir 1" +$BINq ci -m "dir 1" TestExtensively 2 -$BIN ci -m "dir 2" +$BINq ci -m "dir 2" $WC2_UP_ST_COMPARE diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/014_basic_tests /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/014_basic_tests --- fsvs-1.1.14/tests/014_basic_tests 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/tests/014_basic_tests 2008-07-18 06:11:35.000000000 +0100 @@ -12,38 +12,34 @@ do if [[ `$BINdflt st | wc -l` -ne 0 ]] then - $ERROR_NB "status gave unexpected lines:" $BINdflt st - exit 1 + $ERROR "status gave unexpected lines" fi touch "$filename" if [[ `$BINdflt st | wc -l` -ne 2 ]] then - $ERROR_NB "status gave unexpected lines; only 2 line expected," - echo ". changed, '$filename' as new" + $ERROR_NB "Expect: . changed, '$filename' as new" $BINdflt st - exit 1 + $ERROR "got something else." fi echo " ci" - $BINdflt ci -m "$filename-$RANDOM" -o delay=yes > $tmp + $BINdflt ci -m "$filename-$RANDOM" -o delay=yes . > $tmp if [[ `grep -F "N... 0 ./$filename" < $tmp > /dev/null` ]] then - $ERROR_NB "expected '$filename' as new" cat $tmp - exit 1 + $ERROR "expected '$filename' as new" else $SUCCESS "'$filename' is new" fi - if [[ `tail -1 $tmp | grep -v 'committed revision'` ]] + if [[ `tail -1 $tmp` == "committed revision"* ]] then - $ERROR_NB "expected 'committed revision'" - cat $tmp - exit 1 - else $SUCCESS "found revision line" + else + cat $tmp + $ERROR "expected 'committed revision'" fi $WC2_UP_ST_COMPARE @@ -59,10 +55,9 @@ if [[ `$BINdflt st | wc -l` -ne 2 ]] then - $ERROR_NB "status gave unexpected lines; only 2 line expected," - echo ". changed, $filename as deleted" + echo "Expect: . changed, $filename as deleted" $BINdflt st - exit 1 + $ERROR "Doesn't match." fi @@ -73,23 +68,21 @@ echo " ci" -$BINdflt ci -m delete > $tmp +$BINdflt ci -m delete . > $tmp if [[ `grep -F "D... 0 ./$filename" < $tmp > /dev/null` ]] then - $ERROR_NB "expected $filename as deleted" cat $tmp - exit 1 + $ERROR "expected $filename as deleted" else $SUCCESS "$filename is deleted" fi -if [[ `tail -1 $tmp | grep -v 'committed revision'` ]] +if [[ `tail -1 $tmp` == "committed revision"* ]] then - $ERROR_NB "expected 'committed revision'" - cat $tmp - exit 1 -else $SUCCESS "found revision line" +else + cat $tmp + $ERROR "expected 'committed revision'" fi diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/015_sync_repos /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/015_sync_repos --- fsvs-1.1.14/tests/015_sync_repos 2008-03-19 06:42:51.000000000 +0000 +++ fsvs-1.1.17/tests/015_sync_repos 2008-06-18 11:38:40.000000000 +0100 @@ -11,6 +11,7 @@ TMP_UP0=$LOGDIR/015.sync-repos-test.up0.tmp TMP_SYNC=$LOGDIR/015.sync-repos-test.sync.tmp TMP_WC=$LOGDIR/015.sync-repos-test.wc.tmp +logfile=$LOGDIR/015.log dir_path=`$PATH2SPOOL $WC2 dir` @@ -19,6 +20,7 @@ REV=HEAD + # Get the dir-files. ################################ @@ -27,10 +29,9 @@ function copy_dir { -# TODO - do fsvs:md5 or suchlike for encoded files! # Quick fix: the files $sync and $up won't be identical, as the RF_CHECK # flag will be set for some entries. - perl -e 'print scalar(<>); while (<>) { @a=split(/(\s+)/); $a[6] &= ~4; print join("",@a) unless $a[-1] eq "enc-dec\0"; }' + perl -e 'print scalar(<>); while (<>) { @a=split(/(\s+)/); $a[6] &= ~4; } ' } echo "step 1: update from empty set." @@ -60,8 +61,43 @@ echo "step 4: sync" # Do the sync at last, so that a correct list is left # for other tests. -rm $dir_path +# Test whether a copy database gets removed. +# We need an entries file with correct revision numbers, so we do an +# initial sync-repos before the copy. +$BINq sync-repos -r $REV +# We copy on an non-existing target; if we'd create that directory, the +# status check below would fail. +$BINq cp tree/a ggg $BINq sync-repos -r $REV + +$BINdflt st > $logfile +if [[ `wc -l < $logfile ` -eq 0 ]] +then + $SUCCESS "no status output - 1" +else + cat $logfile + $ERROR "status prints something - 1" +fi + +if [[ `$BINdflt cp dump | wc -l` -eq 1 ]] +then + $SUCCESS "copy db gets removed on sync-repos" +else + $ERROR "copy db not removed on sync-repos?" +fi + +# The "cp dump" iterates through the hash itself; look for the copy +# markings in the status report, too. +if $BINdflt st -v | grep -F ".....+" > $logfile +then + cat $logfile + $ERROR "Still copy flags set" +else +# If grep returns an error, no lines were found. + $SUCCESS "No copy flags set" +fi + + # As the RF_CHECK flag is set, we need to normalize: # - Header is taken unchanged # - Keep the whitespace separator, to get the line as identical as possible. @@ -91,14 +127,13 @@ # test if status and update work # root modified is allowed. -if [[ `$BINdflt st | wc -l` -le 1 ]] +$BINdflt st > $logfile +if [[ `grep -v ' \.$' $logfile | wc -l` -eq 0 ]] then - $SUCCESS "no status output" + $SUCCESS "no status output - 2" else - $BINdflt st -d > $LOGDIR/015.status - $ERROR_NB "status prints something:" - $BINdflt st - exit 1 + cat $logfile + $ERROR "status prints something - 2" fi # Update to HEAD @@ -122,23 +157,23 @@ # We'd like to fake some device entry; but that's not easily possible, as # there's no "svn propset URL" currently. -$BINq up -if [[ `$BINdflt st | wc -l` -eq 0 ]] +$BINq up > $logfile +$BINdflt st > $logfile +if [[ `wc -l < $logfile` -eq 0 ]] then $SUCCESS "No status output after meta-data less update" else - $BINdflt st -d > $LOGDIR/015.status + cat $logfile $ERROR "Status output for meta-data-less entries unexpected" - $BINdflt st - exit 1 fi # Symlinks have lrwxrwxrwx, all other entries should be not writeable for # group/others. -if [[ `find no-meta -not -type l -printf "%U %m\n" | grep -v "^$UID [67]00\$"` -eq 0 ]] +find no-meta -not -type l -printf "%U %m\n" | grep -v "^$UID [67]00\$" > $logfile || true +if [[ `wc -l < $logfile` -eq 0 ]] then $SUCCESS "Owner and mode correctly set." else - $ERROR "Wrong rights set" + $ERROR "Wrong rights set - expected go-rwx." fi diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/016_add_unversion /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/016_add_unversion --- fsvs-1.1.14/tests/016_add_unversion 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/tests/016_add_unversion 2008-10-25 12:14:57.000000000 +0100 @@ -25,13 +25,12 @@ $BINq ignore './**' # Maybe we get a ., possibly a m ... # Depends on timing of directory/inode changes (eg. same/different second) -if [[ `$BINdflt st | egrep -v ' .$'` == "" ]] +if [[ `$BINdflt st | wc -l` -le 1 ]] then $SUCCESS "all ignored" else - $ERROR_NB "Not all entries ignored?" $BINdflt st - exit 1 + $ERROR "Not all entries ignored?" fi @@ -56,10 +55,12 @@ # The directory is marked as added, too. -if [[ `$BINdflt st | egrep "^n[m.]\.\." | wc -l` -eq 3 ]] +$BINdflt st | egrep "^n[m.]\.\." > $logfile +if [[ `wc -l < $logfile` -eq 3 ]] then $SUCCESS "3 added" else + cat $logfile $ERROR "not added?" fi @@ -68,9 +69,8 @@ then $SUCCESS "2 committed" else - $ERROR_NB "adds not committed!" cat $logfile - exit 1 + $ERROR "adds not committed!" fi if [[ `svn ls $REPURL/$DIR | grep $PRE | wc -l` -eq 2 ]] @@ -125,8 +125,7 @@ if svn ls $REPURL/$file2 > $logfile 2>&1 then - $ERROR_NB "file still exists in the repository!" - exit 1 + $ERROR "file still exists in the repository!" else $SUCCESS "file was removed from repository." fi @@ -189,3 +188,22 @@ # We don't bother with them here. The next test should reinstate # the wc as needed. +$BINq ci -m2 +mkdir lolcat +$BINdflt add lolcat +rmdir lolcat +if [[ `$BINdflt st | grep lolcat | wc -l` -eq 1 ]] +then + $SUCCESS "added directory shown after rmdir." +else + $ERROR "added directory not shown after rmdir." +fi + +$BINdflt info lolcat | wc -l +if [[ `$BINdflt info lolcat | wc -l` -eq 16 ]] +then + $SUCCESS "added directory info ok." +else + $ERROR "added directory after rmdir has wrong info output." +fi + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/017_locale_iconv /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/017_locale_iconv --- fsvs-1.1.14/tests/017_locale_iconv 2008-01-28 07:14:37.000000000 +0000 +++ fsvs-1.1.17/tests/017_locale_iconv 2008-06-05 07:31:49.000000000 +0100 @@ -35,7 +35,7 @@ # TODO: test whether the entries are correct in the other locale. rm * - $BIN ci -m "locale ci $filename cleanup" + $BINq ci -m "locale ci $filename cleanup" $WC2_UP_ST_COMPARE } diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/018_various /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/018_various --- fsvs-1.1.14/tests/018_various 2008-03-13 07:57:22.000000000 +0000 +++ fsvs-1.1.17/tests/018_various 2008-07-18 06:11:35.000000000 +0100 @@ -16,17 +16,33 @@ logfile=$LOGDIR/018.log -# Exercise the commit msg editor code +# Exercise the commit msg editor code. Try an empty file, too. +echo $RANDOM > $file +EDITOR="touch" $BINdflt ci echo $msg > $file -EDITOR="cp $file" $BINdflt ci +EDITOR="cp $file" $BINdflt ci -o author=NoSuchMan -if svn log $REPURL -rHEAD | grep $msg > $logfile 2>&1 +svn log $REPURL -rHEAD > $logfile +if grep $msg < $logfile > /dev/null then - $SUCCESS "message was taken" + $SUCCESS "Message was taken" +else + $ERROR "Message not fetched from editor!" +fi + +if [[ "$PROTOCOL" != "file://" ]] +then + $WARN "Author only taken for file://; doesn't work for svn+ssh." else - $ERROR "message not fetched from editor!" + if grep NoSuchMan < $logfile > /dev/null + then + $SUCCESS "Author was taken" + else + $ERROR "Author not used on commit" + fi fi + $BINdflt log -rHEAD > $logfile 2>&1 if grep $msg < $logfile > /dev/null then @@ -92,27 +108,43 @@ > $file $BINdflt ci -F $file +# Test limit parameter +$BINdflt log -rHEAD:1 -o limit=1 $file > $logfile +if [[ `wc -l < $logfile` -eq 5 ]] +then + $SUCCESS "log limit obeyed" +else + cat $logfile + wc -l < $logfile + $ERROR "log limit doesn't work" +fi + # Test EPIPE handling. -# I could reproduce it with "strace -o /dev/null $BINdflt log | head -1", -# but only in 1 of 10 cases without the strace. -# Any error code $BINdflt would give is overwritten by "head"; so we use a -# perl loop here. +# I could reproduce it with "strace -o /dev/null $BINdflt log | true", (or +# "| head -1"), but only in 1 of 10 cases without the strace. strace_bin=`which strace || true` strace_cmd=${strace_bin:+$strace_bin -o /dev/null} -cmd="$strace_cmd $BINdflt log" -if perl -e '$cmd=shift(); for(1 .. 10) { open(F, $cmd . " |") || die $!; close(F) || die $!; }' "$cmd" > $logfile 2>&1 -then - # No errors on STDOUT allowed. - if [[ `wc -l < $logfile` -eq 0 ]] +for command in log st +do + ret=$( + set -o pipefail + $strace_cmd $BINdflt $command 2>$logfile | true + echo $? + set +o pipefail ) + if [[ $ret -eq 0 ]] then - $SUCCESS "EPIPE handled correctly" + # No errors on STDOUT allowed. + if [[ `wc -l < $logfile` -eq 0 ]] + then + $SUCCESS "EPIPE on $command handled correctly" + else + $ERROR "wrong number of output lines on EPIPE $command test" + fi else - $ERROR "wrong number of output lines on EPIPE test" + $ERROR "Error code on EPIPE $command" fi -else - $ERROR "Error code on EPIPE" -fi +done # Test whether / at the end of an URL are removed @@ -163,6 +195,31 @@ fi +# Look whether there's a human-readable message (1 line) for non-existing +# CONF or WAA paths. +# Has to include the missing path, so that the generic "no working copy" +# error isn't allowed. +function Check_Path +{ + var=FSVS_$1 + if $BINdflt status /bin > $logfile 2>&1 + then + cat $logfile + $ERROR "Invalid $var doesn't stop?" + else + if [[ `wc -l < $logfile` -eq 1 && + `grep -c "${!var}" < $logfile` -eq 1 ]] + then + $SUCCESS "$var checked" + else + cat $logfile + $ERROR "Wrong message on invalid $var." + fi + fi +} +FSVS_CONF=$WC/not-here Check_Path CONF +FSVS_WAA=$WC/not-here Check_Path WAA + # Define an empty configuration directory, and try to do a status (without # a wc file). if FSVS_CONF=$WC $BINdflt status -N -N /sbin /bin > $logfile 2>&1 @@ -256,8 +313,7 @@ #then # $SUCCESS "invalid locales can stop fsvs" #else -# $ERROR_NB "invalid locales don't give an error?" -# exit 1 +# $ERROR "invalid locales don't give an error?" #fi # diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/019_many_files /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/019_many_files --- fsvs-1.1.14/tests/019_many_files 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/tests/019_many_files 2008-10-02 18:39:31.000000000 +0100 @@ -5,52 +5,106 @@ $INCLUDE_FUNCS cd $WC -COUNT1=20 -COUNT2=20 -COUNT3=20 -seq1=`seq 1 $COUNT1` -seq2=`seq 1 $COUNT2` -seq3=`seq 1 $COUNT3` +logfile=$LOGDIR/019.log + +# Start counting from here, so that the length of the names is equal +# (needed for sorting later). +START=11 +COUNT=20 # We have to escape ( ) and *, as the shell would try to interpret these. -exp_count=`expr $COUNT1 \* \( $COUNT2 \* \( $COUNT3 + 1 \) + 1 \) + 1` +exp_count=$(($COUNT * ( $COUNT * ( $COUNT + 1 ) + 1 ) + 1)) -echo -n "Generating $exp_count entries: " +echo "Generating $exp_count entries." -for lev1 in $seq1 -do - echo -n "." - mkdir $lev1 - cd $lev1 - for lev2 in $seq2 - do - mkdir $lev2 - ( cd $lev2 - for lev3 in $seq3 - do - echo $lev1-$lev2-$lev3 >> $lev3.txt - done & - ) - done - cd .. -done +# Previously this was done via the shell, and partly parallel - but was +# still much slower than perl. +perl -e ' +($start, $end)=@ARGV; +for $a ($start .. $end) { + mkdir($a) || die "$a: $!"; + for $b ($start .. $end) { + $d="$a/$b"; + mkdir($d) || die "$d: $!"; + for $c ($start .. $end) { + $i++; + $f="$d/$c"; + open(F, "> $f") || die "$f: $!"; + print F "$a-$b-$c\n", ("." x ($i % 269)), "\n"; + } + } +} +' $START $(($START+$COUNT-1)) -echo "!" echo "Looking for them." -found=`$BINdflt st | wc -l` +# Generating them is so fast that the directory might stay in the same +# second. +found=`$BINdflt st -C | wc -l` if [[ $found -eq $exp_count ]] then - $SUCCESS "fsvs found all $exp_count changed entries." + $SUCCESS "fsvs found all $exp_count changed entries." else - $ERROR_NB "fsvs found $found instead of $exp_count entries!" - exit 1 + $ERROR "fsvs found $found instead of $exp_count entries!" fi echo "Checkin ..." -$BIN ci -m many +$BINq ci -m many echo "Checkout ..." $WC2_UP_ST_COMPARE +# Do some swapping of entries, so that the names are unchanged, but the +# inode numbers are mixed. +# That's to see that such changes are detected and correctly handled. +function Swap +{ + find $1 | perl -e ' + @list=map { chomp; $_; } ; + srand(1975007003); + $last=$list[rand(@list)]; + $lfn="x"; + @l=($last); + rename($last, $lfn) || die "$last => $lfn: $!\n"; + for(2 .. shift()) + { + $cur=splice(@list, rand(@list), 1); + rename($cur, $last) || die "$cur => $last: $!\n"; + $last=$cur; + push @l, $last; + } + rename($lfn, $last) || die "$lfn => $last: $!\n"; +# Use two spaces, no tab, in picture line! + format STDOUT= + ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< ~~ + $l +. + $l=join(" ",@l); + write; + ' $2 + +# Now there must be N+1 swapped entries. +# We need the -C as the size might be the same. + $BINdflt st -C -C -f text > $logfile + if [[ `wc -l < $logfile` -eq $3 ]] + then + $SUCCESS "Swapping $2 entries with '$1' ok ($3 changed)" + else + cat $logfile + $ERROR_NB "Swapping $2 entries with '$1' wrong" + $ERROR "expected $3 changed, got "`wc -l $logfile` + fi + + $BINq ci -m x -o delay=yes + $WC2_UP_ST_COMPARE +} + +# Swap files only +Swap "$START -type f" 50 50 +# If we swap 10 directories with 20 entries each, we get 200 changed +# entries +Swap ". -maxdepth 2 -mindepth 2 -type d " 10 200 +# 20*20*3 == 1200 +Swap ". -maxdepth 1 -mindepth 1 -type d " 3 1200 + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/020_partial_ci /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/020_partial_ci --- fsvs-1.1.14/tests/020_partial_ci 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/tests/020_partial_ci 2008-10-25 12:17:23.000000000 +0100 @@ -15,10 +15,9 @@ file11=$dir1/1 file12=$dir1/2 -file111=$dir11/1 -file112=$dir11/2 file21=$dir2/1 file22=$dir2/2 +file41=$dir4/1 log=$LOGDIR/020.logfile @@ -36,7 +35,8 @@ [ -e $dir ] || continue for file in 1 2 do - echo "A testline $RANDOM $RANDOM" > $dir/$file + echo "A testline " > $dir/$file + dd if=/dev/zero bs=$RANDOM count=1 >> $dir/$file 2>/dev/null done done } @@ -77,9 +77,8 @@ then $SUCCESS "Ok, commit of a file beneath another new file works" else - $ERROR_NB "wrong data committed (1); only $file11 expected!" cat $log - exit 1 + $ERROR "wrong data committed (1); only $file11 expected!" fi $BINdflt ci -m "new file2" $dir1 -o delay=yes > $log @@ -87,9 +86,8 @@ then $SUCCESS "commit of a file beneath a committed file works" else - $ERROR_NB "wrong data committed (2); only $dir1 and $file12 expected!" cat $log - exit 1 + $ERROR "wrong data committed (2); only $dir1 and $file12 expected!" fi @@ -103,9 +101,8 @@ then $SUCCESS "commit of a changed file beneath another changed file works" else - $ERROR_NB "wrong data committed (3); only $file21 expected!" cat $log - exit 1 + $ERROR "wrong data committed (3); only $file21 expected!" fi if false @@ -119,8 +116,7 @@ $SUCCESS "directory still shows as changed" else $BINdflt st - $ERROR_NB "directory isn't seen as changed" - exit 1 + $ERROR "directory isn't seen as changed" fi fi @@ -131,9 +127,8 @@ then $SUCCESS "commit of a single directory works" else - $ERROR_NB "wrong data committed (4); only 2 lines expected!" cat $log - exit 1 + $ERROR "wrong data committed (4); only 2 lines expected!" fi @@ -146,9 +141,8 @@ then $SUCCESS "initial commit works" else - $ERROR_NB "wrong data committed (5); only 3 lines expected!" cat $log - exit 1 + $ERROR "wrong data committed (5); only 3 lines expected!" fi @@ -160,32 +154,113 @@ then $SUCCESS "complex initial commit works" else - $ERROR_NB "wrong data committed (6); only 2 lines expected!" cat $log - exit 1 + $ERROR "wrong data committed (6); only 2 lines expected!" fi -if false -then -## sync-repos and commit, currently always fails. -## I originally wanted to show another failure here, no "directory -## not found". In my tests, even a patched fsvs 1.0.15 committed -## expluded stuff on a partial commit after a sync-repos. -## In this environment it simply fails with an error message, I -## don't know why... - ChangeData -$BINdflt sync-repos > /dev/null +$BINdflt sync-repos -q -$BINdflt ci -m "changed files after sync-repos" $dir31 > $log -if [[ `grep ./ < $log | wc -l` -eq 2 ]] +$BINdflt ci -m "changed files after sync-repos" ./$dir31 > $log +if [[ `grep -F ./ < $log | wc -l` -eq 3 ]] then $SUCCESS "complex partial commit after sync-repos works" else - $ERROR_NB "wrong data committed (7); only 2 lines expected!" cat $log - exit 1 -fi + $ERROR "wrong data committed (7); expected 'committing to,dir,2files,revX'" fi + + +$BINq ci -m "known state" +$WC2_UP_ST_COMPARE + + +# Now change data, and look whether filtering works. +function TT +{ + expected=$1 + do_changes=$3 + other_changes=$4 + + if [[ "$do_changes" == "" ]] ; then ChangeData ; fi + + $BINdflt st $2 > $log + if [[ `grep -F ./ < $log | wc -l` -ne $expected ]] + then + cat $log + $ERROR "Filtered status for '$2' expected $expected entries." + fi + + $BINdflt ci -m "options are $2, expect $expected" $2 > $log + if [[ `grep -F ./ < $log | wc -l` -ne $expected ]] + then + cat $log + $ERROR "Filtered commit for '$2' expected $expected entries." + fi + + # We cannot easily compare with WC2 here, because there are many changed + # entries, and only a few get committed. + # But we can check if there are still changes here. + $BINdflt st $2 > $log + if [[ `wc -l < $log` -ne 0 ]] + then + cat $log + $ERROR "Still changes after 'commit $2'." + fi + + if [[ "$other_changes" != "" ]] + then + # Expect some changes. + $BINdflt st > $log + if [[ `wc -l < $log` -ne $other_changes ]] + then + $ERROR "Expected $other_changes changes after 'commit $2'" + fi + fi + + $SUCCESS "Filtered commit for '$2' successful." + $BINq delay +} + +# Simple: all data files changed, no new or deleted entries - so no +# directory modifications. +TT 10 "." + +# The directory is, depending on the time dependency, marked as changed, +# too. To get a definitive answer we'll manually touch it. +echo aaaa > dB/Newwww +touch -d"2008-1-1 4:5:6" dB +# expect 1 file, afterwards 1 timestamp +TT 1 "-f new ." 1 +rm dB/Newwww +# expect 1 file, afterwards 1 timestamp +TT 1 "-f deleted ." 1 + +# Other entries may not be recorded. +$INFO "Testing that other entries are not recorded" +echo > $file41 +echo aaaa > dB/Newwww2 +# We expect a single file to be committed; afterwards all other 9 data +# files are changed, and "dB" (mtime). +TT 1 "-f new ." "" 11 + +# Now commit all changed entries; keeps "dB" as mtime. +TT 10 "-f text ." 1 + +# Now sync ... - no changes anymore. +$BINq ci -m sync1 +$WC2_UP_ST_COMPARE + + +echo a > new1 +echo a > $dir31/new2 +rm $file41 +# We expect ".", "dD" and "dC/dA" to be "mtime" +TT 3 "-f new,deleted ." no-change-data 3 + +# Sync again ... +$BINq ci -m sync2 +$WC2_UP_ST_COMPARE + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/021_multi_url_update /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/021_multi_url_update --- fsvs-1.1.14/tests/021_multi_url_update 2007-04-10 06:31:06.000000000 +0100 +++ fsvs-1.1.17/tests/021_multi_url_update 2008-07-18 06:11:00.000000000 +0100 @@ -8,9 +8,11 @@ CMP_WC=`expr $UP_WC + 1` set -e + $PREPARE_CLEAN WC_COUNT=$CMP_WC > /dev/null $INCLUDE_FUNCS +logfile=$LOGDIR/021.multiurl.log for i in `seq 1 $DATA_WCs` do @@ -19,7 +21,7 @@ svn mkdir $tu -m $i echo $tu | $BINq urls load mkdir dir-$i common - touch file-$i dir-$i/file-$i common/file-$i + touch bfile-$i dir-$i/dfile-$i common/cfile-$i echo "Overlay $i" > overlayed $BINq ci -m "ci$i" done @@ -34,22 +36,24 @@ for prio_has in `seq 1 $DATA_WCs` do $INFO "Going with prio_has=$prio_has" + # Construct the URL list and build the compare-directory - urls= parm=--delete echo "" | $BINq urls load for i in `seq 1 $DATA_WCs` do # rotate the highest-priority URL nr=`perl -e 'print 1+(shift()-1+shift()-1) % shift()' $prio_has $i $DATA_WCs` - $BINq urls P:$i,$REPURL/$nr + $BINq urls N:u$nr,P:$i,$REPURL/$nr - rsync -a $parm $WCBASE$nr/ $WCBASE$CMP_WC/ + # We need to give the checksum parameter, so that rsync isn't misled by + # the equal mtimes. + rsync -a $parm $WCBASE$nr/ $WCBASE$CMP_WC/ -c -c parm=--ignore-existing done - $BINq up > /dev/null - $COMPARE $WCBASE$UP_WC/ $WCBASE$CMP_WC/ + $BINdflt up > $logfile + $COMPARE $WCBASE$UP_WC/ $WCBASE$CMP_WC/ 0 ignore_dirmtime done $SUCCESS "Multi-url update test passed." diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/022_update_details /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/022_update_details --- fsvs-1.1.14/tests/022_update_details 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/tests/022_update_details 2008-07-18 06:11:35.000000000 +0100 @@ -15,15 +15,15 @@ # We don't use backticks here - in case there's an error in fsvs, the shell # would not stop. -$BIN up > $logfile +$BINq up > $logfile # this next line has two tabulators - in grep and cut rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d"."` echo "now at rev. $rev" touch empty-file -$BIN ci -m "new file" +$BINq ci -m "new file" # Goto old revision -$BIN up -r$rev -o delay=yes +$BINq up -r$rev -o delay=yes # Modify WC @@ -35,19 +35,18 @@ $BINdflt st | grep new > $logfile # Goto last revision -$BIN up +$BINq up # The status must not have changed! if $BINdflt st | grep new | cmp $logfile - then $SUCCESS "update keeps new files, status shows them" else - $ERROR_NB "update hides new files:" $ERROR_NB "old was:" cat $logfile $ERROR_NB "new is:" $BINdflt st - exit 1 + $ERROR "update hides new files" fi # Now commit, so that the new files are versioned diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/026_diff /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/026_diff --- fsvs-1.1.14/tests/026_diff 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/tests/026_diff 2008-09-15 16:18:37.000000000 +0100 @@ -9,6 +9,7 @@ copy=copy-file log=$LOGDIR/026.diff-log + echo "line" > $file $BINq ci -m "repos-vers" -o delay=yes @@ -40,7 +41,7 @@ fi -$BINq revert $file +$BINq revert $file -odelay=yes if [[ `$BINdflt diff $file | wc -l` -eq 0 ]] then @@ -74,11 +75,13 @@ echo X > $copy -if [[ `$BINdflt diff -r HEAD $copy | wc -l` -eq 6 ]] +$BINdflt diff -r HEAD $copy > $log +if [[ `wc -l < $log` -eq 6 ]] then $SUCCESS "Diff for copied" else - $ERROR "Diff for copied wrong" + wc -l < $log + $ERROR "Diff for copied wrong - expected 6 lines" fi @@ -96,11 +99,122 @@ $SUCCESS "Diff for changed copy" else $BINdflt diff -v $copy - $ERROR "Diff for changed copy" + $ERROR "Diff for changed copy, expected mode change" fi # Try colordiff auto mode -$BINdflt diff -v $copy -o colordiff=auto > /dev/null +$BINdflt diff -v $copy -o colordiff="" > /dev/null + +# Try error handling +if $BINdflt diff $copy -o colordiff=mustneverexist_invalidbinary.$$.$RANDOM > $log 2>&1 +then + $ERROR "Doesn't error out for an invalid colordiff name?" +else + $SUCCESS "Reports bad names for colordiff" +fi + +# No temporary file may be left behind. +if ls $copy.* 2> /dev/null +then + $ERROR "Temporary file left behind." +fi + + +# True immediately exits +if $BINdflt diff $copy -o colordiff=true > /dev/null 2>&1 +then + $ERROR "Doesn't error out for a non-reading colordiff?" +else + $SUCCESS "Reports stopping colordiffs" +fi + +# No temporary file may be left behind. +if ls $copy.* 2> /dev/null +then + $ERROR "Temporary file left behind." +fi + + +# EPIPE? +if $BINdflt diff $copy -o colordiff=cat | true +then + $SUCCESS "Ignores EPIPE" +else + $ERROR "Doesn't handle EPIPE" +fi + +# No temporary file may be left behind. +if ls $copy.* 2> /dev/null +then + $ERROR "Temporary file left behind." +fi + + +# Test "diff -rX" against entries in subdirectories, and compare against +# "live" diff. +# The header lines (current version, timestamp, etc.) are different and +# made equal for comparision. +$BINq ci -m1 -odelay=yes > $log +rev=`grep "revision " $log | tail -1 | cut -f2 -d" " | cut -f1 -d" "` +fn=tree/b/2/file-x +equalizer="perl -pe s#($fn).*#filename_and_so_on#" +echo $RANDOM $$ > $fn +$BINdflt diff $fn | $equalizer > $log +$BINq ci -m1 +# echo aaa > $fn # for verification that the test mechanism works +if $BINdflt diff -r$rev $fn | $equalizer | diff -u - $log +then + $SUCCESS "diff -rX" +else + $ERROR "'diff -rX' gives a different answer" +fi + + + +# Test diff over special entries +ln -s old X +$BINq ci -m1 -odelay=yes > $log +rev1=`grep "revision " $log | tail -1 | cut -f2 -d" " | cut -f1 -d" "` +ln -sf new X +function testdiff +{ + $BINq diff "$@" > $log +# There are additional lines "no linefeed" and "special entry changed". + if [[ `wc -l < $log` -ne 9 ]] + then + cat $log + $ERROR "'diff "$@"' line count wrong" + fi + + if grep -F -- '-link old' < $log && + grep -F -- '+link new' < $log + then + $SUCCESS "'diff "$@"' ok" + else + cat $log + $ERROR "'diff "$@"' output wrong" + fi +} +testdiff -r$rev1 +# Test whether other, non-wanted, entries are diffed. +testdiff X -r$rev1 + +$BINq ci -m1 -odelay=yes > $log +rev2=`grep "revision " $log | tail -1 | cut -f2 -d" " | cut -f1 -d" "` +testdiff -r$rev1:$rev2 +testdiff -r$rev1:$rev2 X + +# Test how much gets diffed on -rX:Y +date > $fn +$BINq ci -m1 > $log +rev3=`grep "revision " $log | tail -1 | cut -f2 -d" " | cut -f1 -d" "` +testdiff -r$rev1:$rev3 X + +# Test diff on removed entries +rm X +testdiff -r$rev1:$rev3 X +$BINq ci -m1 > $log +testdiff -r$rev1:$rev3 X diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/027_recursive /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/027_recursive --- fsvs-1.1.14/tests/027_recursive 2008-03-25 06:22:46.000000000 +0000 +++ fsvs-1.1.17/tests/027_recursive 2008-10-25 12:15:16.000000000 +0100 @@ -17,7 +17,7 @@ done # commit some of the files $BINdflt ci -m 1 -q -o delay=yes -( cd $WC2 && $BINq up ) +$WC2_UP_ST_COMPARE for n in new changed do @@ -36,10 +36,21 @@ { # Action, Parameter, Nr. of expected lines, Message msg="$4" + +# If we do a status, we'll have a logfile anyway. +# If it's a revert, it might be nice to see whe WC before. + if [[ "$1" == "status" ]] + then + rm $LOGFILE.status 2> /dev/null || true + else + $BINdflt status -C -C > $LOGFILE.status + fi + $BINdflt $1 $2 > $LOGFILE - if [[ `wc -l < $LOGFILE` -ne "$3" ]] + lns=`wc -l < $LOGFILE` + if [[ $lns -ne "$3" ]] then - $ERROR "$msg failed - wrong number of output lines." + $ERROR "$msg failed - wrong number of output lines (exp $3, got $lns)." fi shift 4 @@ -79,9 +90,22 @@ revert "1/2/3/4/5/d" 2 "Revert of directory deletion" /d 1 changed 0 same 0 new 0 revert "1/2/3/4/5/6-changed" 2 "Single revert" changed 1 same 0 new 0 status "1/2/3/4/5/6-changed -v" 1 "Status after revert" changed 1 same 0 new 0 "-F ....." 1 +status "1/2/3/4/5 -v" 6 "Status after revert" changed 1 same 1 new 1 /d 1 \ + "-F ......" 3 "-F ....C." 1 "-F .t...." 1 + echo $RANDOM > 1/2/3/4/5/6-changed revert "1/2/3/" 4 "Non-recursive revert" changed 1 same 0 new 0 ".m.?" 2 dir 2 -revert ". -R" 7 "Recursive revert" changed 2 same 0 new 0 + +status "1/2/3/4/5 -v" 6 "Status after revert" changed 1 same 1 new 1 /d 1 \ + "-F ......" 2 "-F ....C." 1 "-F .t..C." 1 "-F .t...." 1 + +# The two changed entries get reverted, and their directories the mtime +# reset. The directory 3 has already been done, so 1, 2 and 6 get reported +# as possibly changed. + +# WHY is 6-new not found??? + +revert ". -R" 6 "Recursive revert" changed 2 same 0 new 0 mC 2 "^\.m\." 3 status "." 6 "Status after revert" new 3 changed 0 same 0 status ". -v" 17 "Verbose status after revert" new 3 changed 3 same 3 "-F ......" 11 @@ -99,8 +123,8 @@ echo something-else > $f-same done -# Directories whose entries got changed get a new mtime. -revert ". -R -o delay=yes" 10 "Full revert 2" changed 3 same 3 /d 0 new 0 ./1/2 8 +# Directories whose entries got changed don't get a new mtime! +revert ". -R -o delay=yes" 7 "Full revert 2" changed 3 same 3 /d 0 new 0 ./1/2 6 # But that doesn't work for the root directory ... so we touch it, then we # know exactly what to test for. touch . diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/028_unittests /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/028_unittests --- fsvs-1.1.14/tests/028_unittests 2007-08-09 06:32:52.000000000 +0100 +++ fsvs-1.1.17/tests/028_unittests 2008-10-10 15:56:28.000000000 +0100 @@ -15,7 +15,9 @@ for path in `pwd`/empty-file tree/a/1/file-z tree/../tree/b/2/./file-y ././/tree/././c/3/.././//2/.././../../dir-with-perms/../tree/c/./3/file-x do - p=`$BINdflt -d -D hlp__pathcopy st $path | tee $log | grep finished | cut -f6 -d" "` + $BINdflt -d -D hlp__pathcopy st $path > $log + # There might be many paths build; take only the last (via tail). + p=`grep finished < $log | tail -1 | cut -f6 -d" "` rl=`readlink -f $path` if [[ "$rl" != "$p" ]] then diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/029_properties /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/029_properties --- fsvs-1.1.14/tests/029_properties 2007-11-12 10:49:29.000000000 +0000 +++ fsvs-1.1.17/tests/029_properties 2008-05-15 06:51:29.000000000 +0100 @@ -11,7 +11,9 @@ propvalue=foobarbaz $BINdflt ps "$propname" "$propvalue" "$file" -propvalread=`$BINdflt pg "$propname" "$file"` +$BINq ci -m1 +$WC2_UP_ST_COMPARE +propvalread=`$BINdflt pg "$propname" "$WC2/$file"` if [[ "$propvalread" == "$propvalue" ]] then $SUCCESS "Property successfully read back" @@ -28,7 +30,9 @@ # empty property $BINdflt ps "$propname" "" "$file" -if [[ `$BINdflt pl -v $file` == "$propname=" ]] +$BINq ci -m1 +$WC2_UP_ST_COMPARE +if [[ `$BINdflt pl -v $WC2/$file` == "$propname=" ]] then $SUCCESS "Property successfully emptied" else @@ -37,30 +41,31 @@ # delete property $BINdflt pd "$propname" "$file" -if [[ `$BINdflt pl $file` == "$file has no properties." ]] + +propvalread=`$BINdflt pg "$propname" "$file"` +if [[ "$propvalread" == "" ]] then - $SUCCESS "Property successfully removed" + $SUCCESS "Property deleted" else - $ERROR "Property not removed!" + $ERROR "Deleted property still there" fi - -# re-set. -$BINdflt ps "$propname" "$propvalue" "$file" - -$BINq ci -m "prop" - -cd $WC2 - -$BINq up -$INFO "Updated other working copy." +$BINq ci -m1 propvalread=`$BINdflt pg "$propname" "$file"` -if [[ "$propvalread" == "$propvalue" ]] +if [[ "$propvalread" == "" ]] then - $SUCCESS "Property successfully read back" + $SUCCESS "Property still deleted after commit" else - $ERROR "Property not read!" + $ERROR "Deleted property back after commit" +fi + +$WC2_UP_ST_COMPARE +if [[ `$BINdflt pl $WC2/$file` == "$file has no properties." ]] +then + $SUCCESS "Property successfully removed" +else + $ERROR "Property not removed!" fi diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/030_eperm_warn /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/030_eperm_warn --- fsvs-1.1.14/tests/030_eperm_warn 2007-07-23 08:18:08.000000000 +0100 +++ fsvs-1.1.17/tests/030_eperm_warn 2008-07-18 06:11:35.000000000 +0100 @@ -44,17 +44,15 @@ echo "Testing stopping" if $BINdflt -v up -W chown-eperm=stop > $logfile 2>&1 then - $ERROR_NB "not stopped!" cat $logfile - exit 1 + $ERROR "not stopped!" else if [[ `grep chown-eperm $logfile | wc -l` -eq 1 ]] then $SUCCESS "stopped." else - $ERROR_NB "stopped for wrong reason?" cat $logfile - exit 1 + $ERROR "stopped for wrong reason?" fi fi @@ -68,9 +66,8 @@ then $SUCCESS "warning given" else - $ERROR_NB "warning NOT given" cat $logfile - exit 1 + $ERROR "warning NOT given" fi diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/032_commit-pipe /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/032_commit-pipe --- fsvs-1.1.14/tests/032_commit-pipe 2007-09-10 05:45:55.000000000 +0100 +++ fsvs-1.1.17/tests/032_commit-pipe 2008-10-10 15:56:38.000000000 +0100 @@ -7,11 +7,12 @@ function cmpdiff { - perl -e '$f=shift; $t=shift; undef $/; $_=<>; exit 1 unless m#---\s+$f.*'"$1"'.*\n\+\+\+\s+$f.*'"$2"'.*\n\@\@ -1 \+1 \@\@\n-$t\n\+$t$t\n#;' $filename $text < $logfile + [[ `cat $logfile` == *"--- $filename"*"$1"*"+++ $filename"*"$2"*"@@ -1 +1 @@"*"-$text"*"+$text$text" ]] } logfile=$LOGDIR/032.commit-pipe +logfile2=$logfile.2 filename=abcdefg.ijk text=abcde.123 encoder="openssl enc -e -a" @@ -42,11 +43,18 @@ # Try update $WC2_UP_ST_COMPARE +# Make sure both arrived there +if [[ X`$BINdflt pl -v $WC2/$filename | sort` == X"fsvs:commit-pipe=$encoder"*"fsvs:update-pipe=$decoder" ]] +then + $SUCCESS "En- and decoder arrived in $WC2." +else + $ERROR "En- or decoder didn't arrive in $WC2." +fi # Try diff echo $text$text > $filename $BINdflt diff $filename > $logfile -if cmpdiff "Rev. $rev_base64" "Local\s+version" +if cmpdiff "Rev. $rev_base64" "Local version" then $SUCCESS "Diff works." else @@ -58,7 +66,7 @@ $BINq ps fsvs:update-pipe "gzip -d" $filename # compare $BINdflt diff $filename > $logfile -if cmpdiff "Rev. $rev_base64" "Local\s+version" +if cmpdiff "Rev. $rev_base64" "Local version" then $SUCCESS "Diff after changing the decoder works." else @@ -69,7 +77,11 @@ $BINdflt ci -m2 > $logfile rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` rev_gzip=$rev -$BINdflt diff -r $rev_base64:$rev_gzip $filename > $logfile +# diff -rx:y currently prints the full path - TODO + +# If we'd just pipe to perl we wouldn't stop on error. +$BINdflt diff -r $rev_base64:$rev_gzip $filename > $logfile2 +perl -pe 's('"$WC"'/*)()g' < $logfile2 > $logfile if cmpdiff "Rev. $rev_base64" "Rev. $rev_gzip" then $SUCCESS "Repos-repos-diff works." @@ -91,15 +103,14 @@ cp -a $filename $tmp perl -e 'open(F, "+< " . shift) || die $!; print F $$;' $filename touch -r $tmp $filename -# Should be seen when using two -C, and not without any -C. +# Should be seen when using checksums, and not without. # With a single -C it would be checksummed if it's likely to be changed - # which it is, because the ctime changed. -if [[ `$BINdflt st $filename | wc -l` != 0 || - `$BINdflt st $filename -C -C | wc -l` == 0 ]] +if [[ `$BINdflt st $filename -o change_check=none | wc -l` != 0 || + `$BINdflt st $filename -o change_check=allfiles | wc -l` == 0 ]] then - $BINdflt st $filename - $BINdflt st $filename -C - $BINdflt st $filename -C -C + $BINdflt st $filename -o change_check=none + $BINdflt st $filename -o change_check=allfiles $ERROR "File status wrong?" fi diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/033_many_symlinks /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/033_many_symlinks --- fsvs-1.1.14/tests/033_many_symlinks 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/033_many_symlinks 2008-10-10 15:58:32.000000000 +0100 @@ -0,0 +1,53 @@ +#!/bin/bash + +set -e +$PREPARE_CLEAN > /dev/null +$INCLUDE_FUNCS +cd $WC + +logfile=$LOGDIR/033.log + + +COUNT=1000 +DIR=subdir + +# We set some ulimit here, so that we know see whether RAM is eaten or not. +# 640k is enough for everyone! No, not here ... +limit=32768 +# On 64bit the libraries need much more (virtual) memory; so we don't limit +# that here. +ulimit -S -d $limit -m $limit -s $limit +ulimit -H -d $limit -m $limit -s $limit + +mkdir $DIR +$BINq ci -m1 + +$INFO "Creating symlinks" +perl -e ' +($nr, $dir)=@ARGV; +for(1 .. $nr) +{ + symlink("./././././../$dir/../$dir/../$dir", + sprintf("%s/%05d", $dir,$_) ) || die $!; +} ' $COUNT $DIR + +$INFO "Looking for them." +# Generating is so fast that the directory might stay in the same second. +found=`$BINdflt st -o change_check=dir | wc -l` +# The directory gets reported, too. +if [[ $found -eq `echo $COUNT + 1 | bc` ]] +then + $SUCCESS "fsvs found all $COUNT changed entries." +else + $ERROR "fsvs found $found instead of $COUNT entries!" +fi + + +$INFO "Commit ..." +$BINq ci -m many + +$INFO "Update ..." +$WC2_UP_ST_COMPARE + +$INFO "sync-repos" +$BINq sync-repos diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/034_status /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/034_status --- fsvs-1.1.14/tests/034_status 2008-04-02 06:23:42.000000000 +0100 +++ fsvs-1.1.17/tests/034_status 2008-10-10 15:58:44.000000000 +0100 @@ -1,9 +1,7 @@ #!/bin/bash set -e -$PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS -cd $WC logfile=$LOGDIR/034.status @@ -36,114 +34,94 @@ $SUCCESS "Trial run $1$2$3$4$5$6$7 ok." } +export FSVS_DIR_SORT +# Keep the dir_sort option at the default *after* the loop; that's +# necessary for tests afterwards. +for FSVS_DIR_SORT in yes no +do + $INFO "Using dir_sort=$FSVS_DIR_SORT." -# without any change? -FiltMTOGNDA 0 0 0 0 0 0 0 + $PREPARE_DEFAULT > /dev/null + cd $WC -# meta-data change. -touch -t 200101270007 $file -FiltMTOGNDA 1 0 0 0 0 0 1 + # without any change? + FiltMTOGNDA 0 0 0 0 0 0 0 -# set as known state. -$BINdflt ci -m 1 + # meta-data change. + touch -t 200101270007 $file + FiltMTOGNDA 1 0 0 0 0 0 1 + # set as known state. + $BINdflt ci -m 1 -# text change, meta-data same -echo aiikortv > $file -touch -t 200101270007 $file -FiltMTOGNDA 0 1 0 0 0 0 1 - - -# text and meta-data change -echo adehlnor > $file -touch -t 200210291240 $file - -FiltMTOGNDA 1 1 0 0 0 0 1 - - -# deleted -rm $file -FiltMTOGNDA 0 1 0 0 0 1 1 - -# replaced -mkdir $file -FiltMTOGNDA 1 1 0 0 1 1 1 - - -# Test with a removed directory -mkdir -p a/b/c/d a/b/c/e a/b/d a/h/u a/h/j -( cd a/h ; touch -d yesterday some files in dir ) -$BINq ci -m 2 -rmdir a/b/c/d a/b/c/e a/b/c a/h/u - -$BINdflt st -C -o filter=deleted > $logfile -if [[ `wc -l < $logfile` -ne 4 || - `grep -w dir < $logfile | wc -l` -ne 4 || - `grep a/ < $logfile | wc -l` -ne 4 ]] -then - cat $logfile - $ERROR "Status output wrong (deleted directories #1)" -fi - -# The parent directories are changed, and that gets counted, too. -$BINdflt st -C -o filter=text > $logfile -if [[ `wc -l < $logfile` -ne 6 || - `grep a/ < $logfile | wc -l` -ne 6 ]] -then - cat $logfile - $ERROR "Status output wrong (deleted directories #2)" -fi - -date > a/h/some -date > a/h/dir - -$BINdflt st -C > $logfile -if [[ `wc -l < $logfile` -ne 8 || - `grep a/ < $logfile | wc -l` -ne 8 ]] -then - cat $logfile - $ERROR "Status output wrong (deleted directories #3)" -fi + # text change, meta-data same + echo aiikortv > $file + touch -t 200101270007 $file + FiltMTOGNDA 0 1 0 0 0 0 1 + # text and meta-data change + echo adehlnor > $file + touch -t 200210291240 $file -$SUCCESS "Ok, filter works." + FiltMTOGNDA 1 1 0 0 0 0 1 + # deleted + rm $file + FiltMTOGNDA 0 1 0 0 0 1 1 -# set as known state. -$BINq ci -m 2 -o delay=yes -$INFO "Testing sorting" + # replaced + mkdir $file + FiltMTOGNDA 1 1 0 0 1 1 1 -# Try sorting. -funsort=$logfile.unsort -fsort=$logfile.sort -touch z a y b x c w -for parm in "" "-v" -do - # We do non-recursive here, because the subdirectories come unsorted. - $BINdflt st -N ? > $funsort - $BINdflt st -N ? -o dir_sort=yes > $fsort - if cmp -s $funsort $fsort + $INFO "Testing removed directories." + + mkdir -p a/b/c/d a/b/c/e a/b/d a/h/u a/h/j + ( cd a/h ; touch -d yesterday some files in dir ) + $BINq ci -m 2 + rmdir a/b/c/d a/b/c/e a/b/c a/h/u + + $BINdflt st -C -o filter=deleted > $logfile + if [[ `wc -l < $logfile` -ne 4 || + `grep -w dir < $logfile | wc -l` -ne 4 || + `grep a/ < $logfile | wc -l` -ne 4 ]] then - $WARN "Sorted equals unsorted?" + cat $logfile + $ERROR "Status output wrong (deleted directories #1)" fi - if sort -k3 $funsort | cmp -s - $fsort + # The parent directories are changed, and that gets counted, too. + $BINdflt st -C -o filter=text > $logfile + if [[ `wc -l < $logfile` -ne 6 || + `grep a/ < $logfile | wc -l` -ne 6 ]] then - echo "Sorting ok" - else - $ERROR "Didn't sort (cmdline='$parm')" + cat $logfile + $ERROR "Status output wrong (deleted directories #2)" + fi + + date > a/h/some + date > a/h/dir + + $BINdflt st -C > $logfile + if [[ `wc -l < $logfile` -ne 8 || + `grep a/ < $logfile | wc -l` -ne 8 ]] + then + cat $logfile + $ERROR "Status output wrong (deleted directories #3)" fi done -$SUCCESS "Sorting works." +$SUCCESS "Ok, filter works." -$BINq ci -m1 -o delay=yes +# set as known state. +$BINq ci -m 2 -o delay=yes + + +$INFO "Testing color output." -# Test color output. function HasEscape { # I cannot make grep and egrep understand \x1b. @@ -164,38 +142,3 @@ rm hazgr HasEscape "Deleted" - -# Check for -N on deleted hierarchies -# Set some known timestamp -touch -d "2008-02-01 12:13" . -$BINq ci -m1 -o delay=yes - -function ExpLines -{ - parms="$1" - exp_cnt="$2" - if [[ `$BINdflt st $parms | wc -l` -eq $exp_cnt ]] - then - $SUCCESS "found $exp_cnt for '$parms'" - else - $BINdflt st $parms - $ERROR "expected $exp_cnt, got "`$BINdflt st $parms | wc -l` - fi -} - - -ExpLines "-C" 0 -rm -r tree -touch -d "2008-02-01 12:13" . - -# With -N -N, no children are looked at. -ExpLines "-N -N -C" 0 -# If we don't pass -C, the timestamp is looked at, and if it's still the -# same no check is done. -ExpLines "-N -N" 0 -# We have to touch the directory; even with -C no children are seen. -touch . -ExpLines "-N -N -C" 1 -ExpLines "-N -N" 1 -ExpLines "-N" 2 -ExpLines "" 41 diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/035_sorted_output /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/035_sorted_output --- fsvs-1.1.14/tests/035_sorted_output 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/035_sorted_output 2008-10-10 15:58:44.000000000 +0100 @@ -0,0 +1,40 @@ +#!/bin/bash + +set -e +$INCLUDE_FUNCS +$PREPARE_DEFAULT > /dev/null +cd $WC + + +logfile=$LOGDIR/035.sorted_output + +funsort=$logfile.unsort +fsort=$logfile.sort + + +$INFO "Testing sorting" + +# We try to create entries with a lower inode number than their parent. +touch z a y b x c w +mkdir H +mv z a y b x c w H +for parm in "" "-v" +do + $BINdflt st > $funsort + $BINdflt st -o dir_sort=yes > $fsort + + if cmp -s $funsort $fsort + then + $WARN "Sorted equals unsorted?" + fi + + if sort -k3 $funsort | cmp -s - $fsort + then + echo "Sorting ok" + else + sort -k3 $funsort | diff -u - $fsort + $ERROR "Didn't sort (cmdline='$parm')" + fi +done +$SUCCESS "Sorting works." + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/036_status_non-recursive /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/036_status_non-recursive --- fsvs-1.1.14/tests/036_status_non-recursive 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/036_status_non-recursive 2008-10-25 12:16:12.000000000 +0100 @@ -0,0 +1,120 @@ +#!/bin/bash + +set -e +$INCLUDE_FUNCS +$PREPARE_DEFAULT > /dev/null +cd $WC + +logfile=$LOGDIR/036.status_non-recursive + + +# Check for -N on deleted hierarchies +# Set some known timestamp +touch -d "2008-02-01 12:13" . +$BINq ci -m1 -o delay=yes + +function ExpLines +{ + parms="$1" + exp_cnt="$2" + $BINdflt st $parms > $logfile + if [[ `wc -l < $logfile` -eq $exp_cnt ]] + then + $SUCCESS "found $exp_cnt for '$parms'" + else + $ERROR_NB "Got for $BINdflt st $parms:" + cat $logfile + $ERROR "expected $exp_cnt lines." + fi +} + + +ExpLines "-C" 0 +rm -r tree +touch -d "2008-02-01 12:13" . + +# With -N -N, no children are looked at. +ExpLines "-N -N -C" 0 +# If we don't pass -C, the timestamp is looked at, and if it's still the +# same no check is done. +ExpLines "-N -N" 0 +# We have to touch the directory; even with -C no children are seen. +touch . +#strace $BINdflt -N -N -C big_file > $logfile +#false +ExpLines "-N -N -C" 1 +ExpLines "-N -N" 1 +ExpLines "-N" 2 +ExpLines "" 41 + +if $BINdflt -o stop_change=true status +then + $ERROR "Expected an error code - 1" +fi + +$BINq ci -m 1 +if $BINdflt -o stop_change=true status +then + $SUCCESS "No error code without change." +else + $ERROR "Expected no error code - 1" +fi + +touch empty-file +if $BINdflt -o stop_change=true status +then + $ERROR "Expected an error code - 2" +fi +if $BINdflt -o stop_change=true -f text status +then + $SUCCESS "Filtering for changes, stopping ok" +else + $ERROR "Expected no error code - 2" +fi + + +# Check change detection options; use a known timestamp, and create them +# simultaneously because of the ctime. +touch -d "2008-07-03 1:3" . big_file big_2 big_3 +$BINq ci -m 1 big_file + + +$INFO "1) change file, same mtime, incl. size." +echo bla > big_2 +touch -d "2008-07-03 1:3" big_2 +mv big_2 big_file +ExpLines "big_file" 1 +ExpLines "-o change_check=file_mtime big_file" 1 +ExpLines "-o change_check=allfiles big_file" 1 +ExpLines "-o change_check=dir big_file" 1 +ExpLines "-C big_file" 1 + +# Delay so that the ctime is different. +$BINq ci -m 1 -o delay=yes + + +$INFO "2) change file, same mtime, same size, different ctime." +echo blu > big_3 +touch -d "2008-07-03 1:3" big_3 +mv big_3 big_file +ExpLines "-o change_check=allfiles big_file" 1 +ExpLines "-o change_check=file_mtime big_file" 1 +ExpLines "-C -C big_file" 1 +# Another test for -C is in 013_manber, where (because the directory gets +# renamed) even the ctime is the same. + +$BINq ci -m 1 -o change_check=full + + +$INFO "3) new entry" +# . is shown as changed, as the check flag is set. +echo blu > new-file-now +touch -d "2008-07-03 1:3" . +ExpLines "-N -N" 1 +ExpLines "-o change_check=file_mtime" 2 +ExpLines "-o change_check=allfiles" 2 +ExpLines "-o change_check=dir" 2 +ExpLines "-C" 2 +ExpLines "-C -C" 2 + + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/038_multiurl_use /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/038_multiurl_use --- fsvs-1.1.14/tests/038_multiurl_use 2008-03-10 07:57:01.000000000 +0000 +++ fsvs-1.1.17/tests/038_multiurl_use 2008-10-10 15:56:18.000000000 +0100 @@ -34,6 +34,8 @@ # PS: is there an easier way for variable indirection? +logfile=$LOGDIR/038.multi-url + set -e $PREPARE_CLEAN WC_COUNT=$NUM_WC > /dev/null $INCLUDE_FUNCS @@ -49,6 +51,8 @@ echo got ya > bin/ls echo something > root-file echo is_base > is_base +mkdir both +echo master_of_desaster > both/master $BINq urls $REP_MASTER $BINq ci -m "master" @@ -72,14 +76,23 @@ cd "${!_loc}" # We have to load, as the (wrong) /trunk was already set by prepare echo "$_rep" | $BINq urls load +# An empty revision 2 is needed. (1 is the mkdir above) + $BINq ci -m empty > /dev/null echo "Set URL $_rep for ${!_loc}" + echo "$which here" > is_$which + $BINq ci -m here > /dev/null echo "is $which" > is_$which + $BINq ci -m is > /dev/null # Make some empty revisions, so that the revision numbers surely disagree for a in a `seq 1 $(($RANDOM % 14))` do $BINq ci -m $a > /dev/null done + mkdir both + echo $which > both/m-$which + $BINq ci -m b + # Goto machine-WC _mac="WC_MACHINE$which" cd "${!_mac}" @@ -212,5 +225,89 @@ fi +# Try to specify a single URL; go to the empty revision. +$BINq up -u local@2 +$COMPARE $WC_MACHINE1/ $WC_MASTER/ 0 ignore-dir-mtime + +# Now the is_1 file should appear. +$BINdflt up -u local@3 > $logfile +# The "." modified is allowed, and there's the "Updated ..." line. +if [[ `wc -l < $logfile` == [23] ]] && + grep -E "^N\.\.\. .* 7 .* is_1$" $logfile +then + $SUCCESS "Single-URL update ok" +else + cat $logfile + $ERROR "Single-URL update wrong?" +fi +# There's at least an empty revision afterwards. +if [[ `$BINdflt diff -u local@3 -r4` != "" ]] +then + $ERROR "Empty single-URL diff wrong?" +fi +$BINdflt diff -u local -r4 > $logfile +if [[ `wc -l < $logfile` == 6 && + `grep -c is_1 $logfile` == 3 ]] +then + $SUCCESS "Single-URL diff ok" +else + cat $logfile + $ERROR "Single-URL diff wrong" +fi + $SUCCESS "Master/Local-URL usage works." + +# Now use the multi-URL setup to test "fsvs cat". +$BINq up + +cd $WC_MACHINE1 +for file in is_1 is_base bin/date +do + md5l=`md5sum - < $file` + md5r=`$BINdflt cat $file | md5sum -` + if [[ "$md5l" != "$md5r" ]] + then + $ERROR_NB "Checksums on unmodified file $file wrong:" + $ERROR_NB "local $md5l" + $ERROR "versus repository $md5r" + fi + + echo doesnt matter never mind > $file + md5r=`$BINdflt cat $file | md5sum -` + if [[ "$md5l" != "$md5r" ]] + then + $ERROR_NB "Checksums on modified file $file wrong:" + $ERROR_NB "local was $md5l" + $ERROR "versus repository $md5r" + fi +done +$SUCCESS "fsvs cat works." + + +# Test -rX +export FSVS_COMMIT_TO=local +file=is_1 + +echo 1 > $file +md5_1=`md5sum < $file` +$BINq ci -m1 $file > $logfile +rev1=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` + +# Change size, to be sure that the change is seen +echo 22 > $file +md5_2=`md5sum < $file` +$BINq ci -m1 $file + +if [[ `$BINdflt cat $file -r$rev1 | md5sum -` != $md5_1 ]] +then + $ERROR "'cat -r$rev1 $file' wrong" +fi + +if [[ `$BINdflt cat $file -rHEAD | md5sum -` != $md5_2 ]] +then + $ERROR "'cat -rHEAD $file' wrong" +else + $SUCCESS "cat -rX works, too." +fi + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/039_debug /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/039_debug --- fsvs-1.1.14/tests/039_debug 2007-10-18 09:17:53.000000000 +0100 +++ fsvs-1.1.17/tests/039_debug 2008-10-25 12:10:17.000000000 +0100 @@ -17,13 +17,15 @@ logfile2=$logfile-2 logfile3=$logfile-3 -# We have to give some option so that the number or parameters is the same. -$BINdflt -o diff_extra=1 -d -D main | cut -f2- -d' ' > $logfile1 +# We have to give some option so that the number of parameters is the same. +# We cut the timestamp and the memory addresses away, as they might be +# different if address space randomization is enabled. +$BINdflt -o diff_extra=1 -d -D main | cut -f2- -d' ' | perl -pe 's#0x\w+#0x*#g' > $logfile1 $BINdflt -o debug_output=$logfile3 -d -D main -cut -f2- -d' ' < $logfile3 > $logfile2 +cut -f2- -d' ' < $logfile3 | perl -pe 's#0x\w+#0x*#g' > $logfile2 -$BINdflt -o debug_output="| cut -f2- -d' ' > $logfile3" -d -D main +$BINdflt -o debug_output="| cut -f2- -d' ' | perl -pe 's#0x\w+#0x*#g' > $logfile3" -d -D main if [[ `md5sum $logfile-* | cut -f1 -d" " | sort -u | wc -l` -eq 1 ]] then diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/040_path_display /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/040_path_display --- fsvs-1.1.14/tests/040_path_display 2008-02-15 05:43:59.000000000 +0000 +++ fsvs-1.1.17/tests/040_path_display 2008-07-18 06:11:35.000000000 +0100 @@ -23,9 +23,8 @@ $BINdflt st $path -opath=$pathparm > $logfile if [[ `wc -l < $logfile` -ne 1 ]] then - $ERROR_NB "too many lines:" cat $logfile - exit 1 + $ERROR "too many lines" fi if grep "^$chg \+[a-z0-9]\+ \+$pat" < $logfile > /dev/null diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/042_checkout /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/042_checkout --- fsvs-1.1.14/tests/042_checkout 2008-01-12 16:12:47.000000000 +0000 +++ fsvs-1.1.17/tests/042_checkout 2008-06-05 07:31:49.000000000 +0100 @@ -20,7 +20,7 @@ rm $dir_norm $dir_sr 2> /dev/null || true mkdir $CODIR -$BIN checkout $REPURL $CODIR +$BINq checkout $REPURL $CODIR $COMPAREWITH $CODIR rm -rf $CODIR diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/044_copyfrom /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/044_copyfrom --- fsvs-1.1.14/tests/044_copyfrom 2008-04-02 06:23:01.000000000 +0100 +++ fsvs-1.1.17/tests/044_copyfrom 2008-07-14 08:26:48.000000000 +0100 @@ -64,8 +64,8 @@ fi # A "true | fsvs cp load" doesn't work, as the copyfrom information is -# added, not replaced. So we revert. -$BINdflt revert . +# added, not replaced. So we undo the copy. +$BINdflt uncopy 3 6 2 if [[ `$BINdflt cp dump -v` == "No copyfrom information was written." ]] then $SUCCESS "Purging copyfrom works" diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/045_copy_details /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/045_copy_details --- fsvs-1.1.14/tests/045_copy_details 2008-01-02 07:40:21.000000000 +0000 +++ fsvs-1.1.17/tests/045_copy_details 2008-07-15 07:46:06.000000000 +0100 @@ -90,3 +90,70 @@ Test ls -i | sort -n + + +# Test uncopy. +mkdir d1 +date > d1/date +$BINq ci -m "T" -o delay=yes +targ=target-$$ + +echo $targ $$ > "$targ" +mkdir d2 +echo $$ > d2/date +echo $$ > d2/new + +$BINdflt cp "$file1" "$targ" +$BINdflt cp d1 d2 + +if [[ `$BINdflt st "$targ"` != ".mC+ "*" $targ" ]] +then + $BINdflt st "$targ" + $ERROR "Unexpected status output after cp." +fi +$BINdflt st d2 > $logfile +if [[ `sort < $logfile` != ".mC+ "*" d2/date"*".mC+ "*" dir d2"*"N... "*" d2/new" ]] +then + cat $logfile + $ERROR "Unexpected status output after cp." +fi + +$BINdflt uncopy "$targ" +if [[ `$BINdflt st "$targ"` == "N... "*" $targ" ]] +then + $SUCCESS "'uncopy file' works" +else + $BINdflt st "$targ" + $ERROR "Unexpected status output after 'uncopy file'." +fi +$BINdflt uncopy d2 +$BINdflt st > $logfile +if [[ `grep "^N\.\.\." < $logfile | wc -l` -eq 4 ]] +then + $SUCCESS "'uncopy dir' works" +else + $BINdflt st + $ERROR "Unexpected status output after 'uncopy dir'." +fi + + +# Now test uncopy of added and property-set entries +$BINq cp d1 d2 +echo 12345 > d2/added +echo 54321 > d2/prop +$BINq ps a b d2/prop +$BINq add d2/added + + +$BINdflt uncopy d2 +$BINdflt ignore './**' +$BINdflt st d2 > $logfile +if [[ `sort < $logfile` == ".m.. "*" dir d2"*"n... "*" 6 d2/added"*"nP.. "*" 6 d2/prop" ]] +then + $SUCCESS "'uncopy dir' with added works" +else + $BINdflt st + $ERROR "Unexpected status output after 'uncopy dir' for added entries." +fi + + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/047_revert_details /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/047_revert_details --- fsvs-1.1.14/tests/047_revert_details 2008-03-25 06:22:26.000000000 +0000 +++ fsvs-1.1.17/tests/047_revert_details 2008-10-25 12:16:24.000000000 +0100 @@ -78,9 +78,9 @@ fi Check "$cur_dest" "...+" "revert on copied" $BINq revert $cur_dest - Check "$cur_dest" "N..." "revert*2 on copied" - $BINq revert $cur_dest - Check "$cur_dest" "N..." "revert*3 on copied" + Check "$cur_dest" "...+" "revert*2 on copied" + $BINq uncopy $cur_dest + Check "$cur_dest" "N..." "revert, uncopy on copied" done if [[ "$failed" == "1" ]] @@ -96,7 +96,7 @@ dir=directory mkdir -m 0777 $dir -$BINq ci -m2 +$BINq ci -m2 -odelay=yes ls -lad $dir $dest > $logfile @@ -107,7 +107,10 @@ else chown bin.bin $dir $dest fi -chmod 000 $dir $dest + +# For files with umask 000 FSVS should show "maybe changed", not fail. +chmod 000 $dest $dir +$BINdflt st $BINq revert $dir $BINq revert $dest @@ -118,4 +121,3 @@ $ERROR "Meta-data not reverted" fi -exit diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/048_warnings /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/048_warnings --- fsvs-1.1.14/tests/048_warnings 2008-02-21 06:15:40.000000000 +0000 +++ fsvs-1.1.17/tests/048_warnings 2008-04-03 06:27:38.000000000 +0100 @@ -33,14 +33,6 @@ fi -if FSVS_WARNINGS="meta-user=ignore unknown-warning=ignore" $BINdflt st > $logfile 2>&1 -then - $ERROR "FSVS_WARNINGS not used?" -else - $SUCCESS "FSVS_WARNINGS seems to be used" -fi - - if [[ 1$opt_DEBUG == 11 ]] then # We need a sub-shell, as we expect an error returned and have to remove @@ -83,15 +75,6 @@ $ERROR "FSVS_WARNING not parsed?" fi - FSVS_WARNINGS=_test-warning=once $BINdflt st > $logfile 2>&1 - el=$? - if [[ $el -eq 0 && `grep test-warning $logfile` ]] - then - $SUCCESS "FSVS_WARNINGS used" - else - $ERROR "FSVS_WARNINGS not parsed?" - fi - # Check whether the config file is respected echo 'warning=_test-warning=stop' > $FSVS_CONF/config if $BINdflt -d st > $logfile 2>&1 diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/049_invalid_props /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/049_invalid_props --- fsvs-1.1.14/tests/049_invalid_props 2008-02-22 13:26:08.000000000 +0000 +++ fsvs-1.1.17/tests/049_invalid_props 2008-05-19 05:49:23.000000000 +0100 @@ -46,6 +46,8 @@ then $SUCCESS "Timestamp is current" else + date + ls -la $filename $ERROR "Timestamp is wrong" fi diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/053_conflicts /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/053_conflicts --- fsvs-1.1.14/tests/053_conflicts 2008-04-02 06:22:41.000000000 +0100 +++ fsvs-1.1.17/tests/053_conflicts 2008-06-14 18:54:57.000000000 +0100 @@ -16,7 +16,7 @@ # Now change line 7. perl -pe 's/^7$/7local/' < common_ancestor > locally_changed # What the merged file should look like -merge -A -p locally_changed common_ancestor repository > merged_ok +diff3 -m locally_changed common_ancestor repository > merged_ok # Another local data, with conflict perl -pe 's/^2$/2 is changed=conflict/' < common_ancestor > will_conflict # Output of the merge conflict can be done only when we know the revision @@ -28,16 +28,16 @@ # Put into repository. cat common_ancestor > $target -$BINq ci -m 1 $target > $logfile +$BINq ci -m 1 $target -o delay=yes > $logfile rev_old=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` cat repository > $target -$BINq ci -m 2 $target > $logfile +$BINq ci -m 2 $target -o delay=yes > $logfile rev_new=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` # Now create the merge conflict compare file. -if merge -A -p -L ./$target.mine -L ./$target.r$rev_old -L ./$target.r$rev_new will_conflict common_ancestor repository > merged_conflict +if diff3 -m -L ./$target.mine -L ./$target.r$rev_old -L ./$target.r$rev_new will_conflict common_ancestor repository > merged_conflict then $ERROR "merge doesn't gives a conflict?" fi @@ -201,7 +201,9 @@ # Now do a conflict cat will_conflict > $target -$BINq up -o conflict=merge > $logfile +touch time_mark_file +sleep 1 +$BINq up -o conflict=merge -o delay=yes > $logfile if ! cmp $target merged_conflict then diff -u $target merged_conflict @@ -230,7 +232,7 @@ $ERROR "No conflict on mis-merge?" fi -if $BINdflt ci $target -m 1 +if $BINdflt ci $target -m conflict then $ERROR "shouldn't commit a conflicted file!" else @@ -251,7 +253,7 @@ # After resolve only $target should be seen - not any other files. # With just "status" we'd get a line for ".", too - which we don't want. $BINdflt st * > $logfile -if [[ `grep '^N' < $logfile | wc -l` -ne 6 ]] +if [[ `grep '^N' < $logfile | wc -l` -ne 7 ]] then cat $logfile $ERROR "resolve takes unknown files, too" @@ -265,6 +267,16 @@ fi +# Look whether the merged file has a NOW timestamp - ie. newer than the +# marked file +if [[ $target -ot time_mark_file ]] +then + ls -la --full-time $target time_mark_file + $ERROR "Timestamp of merged file wrong" +else + $SUCCESS "merged file has mtime NOW" +fi + if $BINdflt ci $target -m 1 then diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/054_sync_revert /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/054_sync_revert --- fsvs-1.1.14/tests/054_sync_revert 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/054_sync_revert 2008-06-09 21:35:03.000000000 +0100 @@ -0,0 +1,17 @@ +#!/bin/bash + +set -e +$PREPARE_DEFAULT > /dev/null +$INCLUDE_FUNCS + +cd $WC + +$BINq up + +rm -rf * +$BINq sync-repos + +$BINq revert -R -R . + +$WC2_UP_ST_COMPARE + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/055_rel-ignore /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/055_rel-ignore --- fsvs-1.1.14/tests/055_rel-ignore 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/055_rel-ignore 2008-07-17 05:42:38.000000000 +0100 @@ -0,0 +1,72 @@ +#!/bin/bash + +set -e +$PREPARE_CLEAN > /dev/null +$INCLUDE_FUNCS +cd $WC + +# General ignore patterns are tested somewhere else. +# Here just the special rel-ignore function is tested. + +logfile=$LOGDIR/055.log + + +mkdir -p a/b +$BINq ci -m1 -o delay=yes + +touch a/b/f1 a/b/fI +touch g1 gI + +function exp +{ + dumped="$1" + cnt=$2 + cnt_I="$3" + shift 3 + # Rest is patterns to load + + while [[ "$1" != "" ]] + do + pat="$1" + shift + + true | $BINdflt ignore load + $INFO "Testing $pat" + $BINdflt rel-ignore "$pat" + + $BINdflt st > $logfile + if [[ `grep I $logfile | wc -l` -ne $cnt_I ]] + then + cat $logfile + $ERROR "$cnt_I *I can be ignored, but found "`grep -c I < $logfile` + fi + + if [[ `wc -l < $logfile` -ne $cnt ]] + then + cat $logfile + $ERROR "$cnt lines expected, "`wc -l < $logfile`" got" + fi + + $BINdflt ignore dump > $logfile + if [[ `cat $logfile` == "$dumped" ]] + then + $SUCCESS "$dumped matched" + else + cat $logfile + $ERROR "$dumped not matched" + fi + done +} + +exp "./**/*I" 4 0 "**/*I" "**/../**/*I" +exp "./**I" 4 0 "**I" "./**I" "$WC/**I" +# Ignore only on top level +exp "./*I" 5 1 "**/../*I" +# From here on only level 2 below is ignored, so the single ./gI entry +# gets found. +exp "./*/*/*I" 5 1 "*/*/*I" "*/../*/X/../*/*I" "$WC/*/*/227/../*I" +exp "./a/**I" 5 1 "./a/**I" +# All can be found +exp "./a/**p" 6 2 "a/**p" +exp "./*/*I" 6 2 "*/*I" "./*/*I" "$WC/*/*I" + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/056_all_removed /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/056_all_removed --- fsvs-1.1.14/tests/056_all_removed 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/056_all_removed 2008-10-10 15:57:52.000000000 +0100 @@ -0,0 +1,35 @@ +#!/bin/bash + +set -e +$PREPARE_CLEAN > /dev/null +$INCLUDE_FUNCS +cd $WC + +logfile=$LOGDIR/056.all_removed + +# Testing the all_removed option. +mkdir -p 1/2/3/4/5/6/7 +$BINq ci -m1 +rm -r 1/2 + +for FSVS_DIR_SORT in yes no +do + $INFO "testing all_removed for dir_sort=$FSVS_DIR_SORT." + + $BINdflt st -o all_removed=yes > $logfile + if [[ `wc -l < $logfile` != 7 ]] + then + cat $logfile + $ERROR "all_removed=yes doesn't work." + fi + + $BINdflt st -o all_removed=no > $logfile + if [[ `wc -l < $logfile` != 2 ]] + then + cat $logfile + $ERROR "all_removed=no doesn't work." + fi +done + +$SUCCESS "all_removed seems ok." + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/057_setenv /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/057_setenv --- fsvs-1.1.14/tests/057_setenv 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/057_setenv 2008-10-29 07:18:47.000000000 +0000 @@ -0,0 +1,43 @@ +#!/bin/bash + +set -e +$PREPARE_CLEAN > /dev/null +$INCLUDE_FUNCS +cd $WC + + +dir=1/a/X +fn=kairo22 +path=$dir/$fn +logenv=$LOGDIR/056.env +logrev=$LOGDIR/056.rev + +mkdir -p $dir +echo Data=$$ > $path + +# We use update, to see whether the revision is set too. +# We have to read from STDIN, else FSVS cries. +$BINdflt ps fsvs:update-pipe "cat > /dev/null ; set | grep ^FSVS_ > $logenv " $path +$BINq ci -mx + +$BINdflt up $WC2 > $logrev +rev=`grep "revision " $logrev | tail -1 | cut -f2 -d" " | cut -f1 -d.` + +function Check +{ + if ! grep --line-regexp "$@" "$logenv" + then + cat $logenv + $ERROR "Didn't see $@" + fi +} + +Check FSVS_CONF=$FSVS_CONF +Check FSVS_WAA=$FSVS_WAA +Check FSVS_CURRENT_ENTRY=$path +Check FSVS_SOFTROOT= +Check FSVS_WC_ROOT=$WC2 +Check FSVS_WC_CONF=$($TEST_PROG_DIR/path2spool $WC2 "^") +Check FSVS_TARGET_REVISION=$rev + +$SUCCESS "Environment correctly set." diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/058_ignore_modematch /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/058_ignore_modematch --- fsvs-1.1.14/tests/058_ignore_modematch 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/058_ignore_modematch 2008-10-29 07:19:09.000000000 +0000 @@ -0,0 +1,64 @@ +#!/bin/bash + +set -e +$PREPARE_CLEAN > /dev/null +$INCLUDE_FUNCS +cd $WC + +$BINdflt delay + +# General ignore patterns are tested somewhere else. +# Here the mode match is tested. + +logfile=$LOGDIR/058.log +ign_file=`$PATH2SPOOL $WC Ign` + +if $BINdflt ignore m:0700:0070 > $logfile 2>&1 +then + $ERROR "Wrong match pattern (masks) shouldn't work." +fi +if $BINdflt ignore m-7:7 > $logfile 2>&1 +then + $ERROR "Wrong match pattern (syntax) shouldn't work." +fi +if $BINdflt ignore m:8:7 > $logfile 2>&1 +then + $ERROR "Wrong match pattern (octal) shouldn't work." +fi +if $BINdflt ignore m:a > $logfile 2>&1 +then + $ERROR "Wrong match pattern (non-numeric) shouldn't work." +fi + + +function T +{ + exp=$1 + shift + $INFO "testing $@." + + test -e $ign_file && rm $ign_file + $BINdflt ignore "$@" + $BINdflt st | grep -v dir > $logfile || true + if [[ `wc -l < $logfile` -eq $exp ]] + then + $SUCCESS "Match mode $@ ok." + else + cat $logfile + $ERROR "Expected $exp lines output for $@." + fi +} + + +date > file +chmod 0750 file + +T 0 './**' +T 1 't./file' './**' +T 0 'm:0700:0700,./file' +T 1 'm:0700:0500,./**' +T 0 m:0050:0050 +T 1 m:0050:0000 +T 0 m:0007:0000 + + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/060_components /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/060_components --- fsvs-1.1.14/tests/060_components 2007-08-09 06:33:39.000000000 +0100 +++ fsvs-1.1.17/tests/060_components 2008-04-18 06:12:45.000000000 +0100 @@ -29,7 +29,7 @@ then $INFO "$scr ok" else - echo "$scr:1: unexpected answer" + echo "$base/$scr:1: unexpected answer" $ERROR "Component-test failed" fi done diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/090_special /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/090_special --- fsvs-1.1.14/tests/090_special 2008-02-22 19:00:46.000000000 +0000 +++ fsvs-1.1.17/tests/090_special 2008-10-25 12:09:08.000000000 +0100 @@ -42,10 +42,11 @@ function CT { delay_start=`date +%s` - for a in 1 2 3 4 +# Make sure the files have different lengths + for a in 1 22 333 4444 do echo $a > file - $BINq ci -m1 $1 > /dev/null + $BINq ci -m$a $1 > /dev/null # Alternatively we could test whether the microseconds are nearly 0 when we # get here. done @@ -67,9 +68,24 @@ # At least two seconds difference should be here. if [[ $(expr $with_delay - $normal) -ge 2 ]] then - $SUCCESS "delay seems to work." + $SUCCESS "The delay option seems to work." else - $ERROR "delay too fast" + $ERROR "The delay option is too fast" +fi + + +$INFO "Testing delay command." +dirfile=`$PATH2SPOOL . dir` +touch -d "now + 2 seconds" $dirfile +a=`date +%s` +$BINdflt delay +b=`date +%s` +diff=`echo $b - $a | bc` +if [ $diff -ge 1 ] +then + $SUCCESS "Delay command works." +else + $ERROR "Delay command took only diff=$diff" fi @@ -120,7 +136,7 @@ # another commit. G=40/32/file touch $G -if [[ `$BINdflt st` == ".m.?"*" 0 "*"$G" ]] +if [[ `$BINdflt st` == ".m.."*" 0 "*"$G" ]] then $SUCCESS "Entry list seems to be correct" else diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/compare /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/compare --- fsvs-1.1.14/tests/compare 2007-04-10 06:30:07.000000000 +0100 +++ fsvs-1.1.17/tests/compare 2008-06-23 07:52:08.000000000 +0100 @@ -4,29 +4,47 @@ $d=shift; ($s && $d) || die "Which directories to compare???\n"; -$expect_fail=!!(shift()); +# !! crashes syntax highlightning in vim +$expect_fail= ! !(shift()); -# strace -o /tmp/asdga -f -tt -$cmd=qq'LANG=C rsync -n -v -v --delete -a --stats "$s" "$d"'; -$_=`$cmd`; +$ignore_dmtime= shift(); -$SIG{"__DIE__"} = sub { print STDERR '$ ',$cmd,"\n",$_,@_; exit 1; }; +# find /tmp/fsvs-test-1000/wc-1/ -printf "% y%04m %7s %5G %5U %T+ %P\0\t%l\0\n" +# strace -o /tmp/asdga -f -tt +$cmd=qq'LANG=C rsync -ain --delete "$s" "$d" -c'; +@changes=(); +for (`$cmd`) +{ +# Ignore empty lines. + next if m(^\s*$); -($count)=( m#Number of files transferred: (\d+)#i ); -($deleted)=( m#[\r\n]deleting (?!in \.)#i ); +# ignore root directory + next if m(\.d......... \./\s*$); +# ignore mtime of links + next if m(\.L\.\.t\.\.\.\.\.\. .* -> ); +# and directories + next if $ignore_dmtime && m(\.d\.\.t\.\.\.\.\.\. .*); + +# everything else is a change. + push @changes, $_; +#.L..t...... typechange/device-symlink -> device-1 +#>fc........ typechange/dir-file +#.L..t...... typechange/file-symlink -> file-1 +#cL+++++++++ typechange/symlink-symlink -> symlink-1 +} -defined($count) || die "----- Cannot interpret this answer\n"; +$SIG{"__DIE__"} = sub { print STDERR '$ ',$cmd,"\n",@changes,@_; exit 1; }; -if ($expect_fail && ($count || $deleted)) +if ($expect_fail && @changes) { print "----- expected differences\n"; # pass output for further processing - print $_; + print @changes; exit 0; } -die "----- Differences were found\n" if ( $count || $deleted ); +die "----- Differences were found\n" if @changes; print "----- comparison of directory gave no differences\n"; exit 0; diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/comp-test/030_alloc_free.ct /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/comp-test/030_alloc_free.ct --- fsvs-1.1.14/tests/comp-test/030_alloc_free.ct 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/comp-test/030_alloc_free.ct 2008-04-18 06:13:01.000000000 +0100 @@ -0,0 +1,68 @@ +## Test for struct estat alloc/free. +## Stops automatically in _do_component_tests. + +## We allocate 10 (struct estat)s, free some in a defined order, and check +## whether the freelist matches the expectations. + +set free_list=0 + +#= 0 +call ops__allocate( 10, estat_array, int_array+0) + +#= 10 +print int_array[0] + + +## set pointers +set estat_array[1]=estat_array[0]+1 +set estat_array[2]=estat_array[1]+1 +set estat_array[3]=estat_array[2]+1 +set estat_array[4]=estat_array[3]+1 +set estat_array[5]=estat_array[4]+1 +set estat_array[6]=estat_array[5]+1 +set estat_array[7]=estat_array[6]+1 +set estat_array[8]=estat_array[7]+1 +set estat_array[9]=estat_array[8]+1 + + +## Now we have () () () () () () () () () () +## and free as 3 1 2 + + +#= 0 +call ops__free_entry(estat_array+3) + +#= 1 +print free_list->count +#= 0 +print (long)free_list->next - (long)(estat_array[0]+10) +#= 0 +print (long)free_list - (long)(estat_array[0]+3) + +#= 0 +call ops__free_entry(estat_array+4) + + +## Now test merging! + + +#= 2 +print free_list->count +#= 0 +print (long)free_list->next - (long)(estat_array[0]+10) +## Entry 3 was set to NULL by ops__free_entry() +#= 0 +print (long)free_list - (long)(estat_array[0]+3) + +#= 0 +call ops__free_entry(estat_array+2) + +#= 3 +print free_list->count +#= 0 +print (long)free_list - (long)(estat_array[0]+2) + + +kill + + diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/comp-test/080_find_common_base.ct /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/comp-test/080_find_common_base.ct --- fsvs-1.1.14/tests/comp-test/080_find_common_base.ct 2007-08-16 05:49:42.000000000 +0100 +++ fsvs-1.1.17/tests/comp-test/080_find_common_base.ct 2008-10-10 15:58:12.000000000 +0100 @@ -16,7 +16,7 @@ set charp_array_1[1]="$#$ENV{'WC'}#/b" set charp_array_1[2]="$#$ENV{'WC'}#/c" #= 0 -print waa__find_common_base(3, charp_array_1, &charpp) +print waa__find_common_base2(3, charp_array_1, &charpp, 0) #~ 0x\w+ "a" print charpp[0] #~ 0x\w+ "b" @@ -27,7 +27,7 @@ set charp_array_1[0]="$#$ENV{'WC'}#/a/h" set charp_array_1[1]="$#$ENV{'WC'}#/b" #= 0 -print waa__find_common_base(2, charp_array_1, &charpp) +print waa__find_common_base2(2, charp_array_1, &charpp, 0) #~ 0x\w+ "a/h" print charpp[0] #~ 0x\w+ "b" @@ -36,7 +36,7 @@ set charp_array_1[0]="$#$ENV{"WC"}#/a/h" set charp_array_1[1]="$#$ENV{"WC"}#/a/j" #= 0 -print waa__find_common_base(2, charp_array_1, &charpp) +print waa__find_common_base2(2, charp_array_1, &charpp, 0) #~ 0x\w+ "a/h" print charpp[0] #~ 0x\w+ "a/j" @@ -45,7 +45,7 @@ set charp_array_1[0]="/does_never/exist" set charp_array_1[1]="/never/so_we/get_an/error" #= 2 -print waa__find_common_base(2, charp_array_1, &charpp) +print waa__find_common_base2(2, charp_array_1, &charpp, 0) kill diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/Makefile.in /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/Makefile.in --- fsvs-1.1.14/tests/Makefile.in 2008-02-21 13:17:45.000000000 +0000 +++ fsvs-1.1.17/tests/Makefile.in 2008-10-25 12:17:53.000000000 +0100 @@ -56,7 +56,7 @@ # options from configure opt_DEBUG = @ENABLE_DEBUG@ -export opt_DEBUG +export opt_DEBUG BASH_VERBOSE TEST_LIST ifdef TEST_LIST @@ -198,5 +198,5 @@ run_tests: @echo Running tests... @echo '' > $(FSVS_CONF)/config - @set -e ; for test in $(TEST_LIST) ; do echo "" ; tput setaf 4 || true ; echo "_______________________________________________________" ; echo " \"$$test\":1: ("`date`")"`tput op` ; xx=`pwd`/$$test ; ( cd $(TESTBASE) && CURRENT_TEST=$$test bash $(BASH_VERBOSE) $$xx 2>&1 ) || { echo "----++---- $$xx failed ----++----" ; exit 1 ; } ; done + @$(TEST_PROG_DIR)/run-tests diff -Nru /tmp/iUV43XtGcF/fsvs-1.1.14/tests/run-tests /tmp/FWQOjxAJQ0/fsvs-1.1.17/tests/run-tests --- fsvs-1.1.14/tests/run-tests 1970-01-01 01:00:00.000000000 +0100 +++ fsvs-1.1.17/tests/run-tests 2008-10-29 07:18:57.000000000 +0000 @@ -0,0 +1,34 @@ +#!/bin/bash + +set -e + +anyfail=0 +. ./test_functions + +for test in $TEST_LIST +do + echo "" + tput setaf 4 || true + echo "_______________________________________________________" + echo " \"$test\":1: ("`date`")"`tput op` + script=`pwd`/$test + if ! ( cd $TESTBASE && CURRENT_TEST=$test bash $BASH_VERBOSE $script 2>&1 ) + then + echo "----++---- $script failed ----++----" + + if [[ -z "$TEST_FAIL_WRITE_HDL" ]] + then + exit 1 + else + if [[ "$anyfail" == 0 && -n "$TEST_TTY_HDL" ]] + then + $ERROR_NB "First failed test is $test" > $TEST_TTY_HDL + fi + anyfail=1 + echo $test > $TEST_FAIL_WRITE_HDL + fi + fi +done + +exit $anyfail +