Error "parche mal formado" al aplicar la fuente diff to wget con parche

Me gustaría aplicar este parche así que copié código que comenzaba en "Index: src / options.h" y terminaba con "+ @ item" y lo colocaba en un nuevo file creado en la carpeta de código fuente de wget . Entonces lo hice:

 $ patch -p0 < name_of_patch (Patch is indented 1 space.) patching file src/options.h patch: **** malformed patch at line 6: char **excludes; /* List of excluded FTP directories. */ 

¿Cómo se supone que debo aplicar esto?

Este es el contenido del file que creé:

  Index: src/options.h =================================================================== --- src/options.h (revision 2276) +++ src/options.h (working copy) @@ -62,6 +62,8 @@ char **excludes; /* List of excluded FTP directories. */ char **includes; /* List of FTP directories to follow. */ + int maxsize; /* Maximum file size (kB) */ + int minsize; /* Minimum file size (kB) */ bool ignore_case; /* Whether to ignore case when matching dirs and files */ Index: src/init.c =================================================================== --- src/init.c (revision 2276) +++ src/init.c (working copy) @@ -182,6 +182,8 @@ { "loadcookies", &opt.cookies_input, cmd_file }, { "logfile", &opt.lfilename, cmd_file }, { "login", &opt.ftp_user, cmd_string },/* deprecated*/ + { "maxsize", &opt.maxsize, cmd_number }, + { "minsize", &opt.minsize, cmd_number }, { "mirror", NULL, cmd_spec_mirror }, { "netrc", &opt.netrc, cmd_boolean }, { "noclobber", &opt.noclobber, cmd_boolean }, Index: src/http.c =================================================================== --- src/http.c (revision 2276) +++ src/http.c (working copy) @@ -2252,7 +2252,7 @@ retried, and retried, and retried, and... */ uerr_t http_loop (struct url *u, char **newloc, char **local_file, const char *referer, - int *dt, struct url *proxy) + int *dt, struct url *proxy, bool can_ommit) { int count; bool got_head = false; /* used for time-stamping and filename detection */ @@ -2285,6 +2285,27 @@ if (opt.ftp_glob && has_wildcards_p (u->path)) logputs (LOG_VERBOSE, _("Warning: wildcards not supported in HTTP.\n")); + /* Try fetching the document header and checking the document length */ + if (can_ommit && !opt.spider && !opt.ignore_length && + (opt.minsize > 0 || opt.maxsize > 0)) + { + /* Setup hstat struct. */ + xzero (hstat); + hstat.referer = referer; + + *dt = HEAD_ONLY; + err = gethttp (u, &hstat, dt, proxy); + + if (err == RETRFINISHED && hstat.contlen > 0 && + (opt.minsize > 0 && hstat.contlen < opt.minsize * 1024 || + opt.maxsize > 0 && hstat.contlen > opt.maxsize * 1024)) + { + logputs (LOG_VERBOSE, _("File too small or too big -- not retrieving.\n")); + ret = FILEBADFILE; + goto exit; + } + } + /* Setup hstat struct. */ xzero (hstat); hstat.referer = referer; @@ -2300,7 +2321,7 @@ /* Reset the document type. */ *dt = 0; - + /* THE loop */ do { Index: src/http.h =================================================================== --- src/http.h (revision 2276) +++ src/http.h (working copy) @@ -32,7 +32,7 @@ struct url; uerr_t http_loop (struct url *, char **, char **, const char *, int *, - struct url *); + struct url *, bool); void save_cookies (void); void http_cleanup (void); time_t http_atotm (const char *); Index: src/res.c =================================================================== --- src/res.c (revision 2276) +++ src/res.c (working copy) @@ -545,7 +545,7 @@ *file = NULL; opt.timestamping = false; opt.spider = false; - err = retrieve_url (robots_url, file, NULL, NULL, NULL, false); + err = retrieve_url (robots_url, file, NULL, NULL, NULL, false, false); opt.timestamping = saved_ts_val; opt.spider = saved_sp_val; xfree (robots_url); Index: src/retr.c =================================================================== --- src/retr.c (revision 2276) +++ src/retr.c (working copy) @@ -601,7 +601,7 @@ uerr_t retrieve_url (const char *origurl, char **file, char **newloc, - const char *refurl, int *dt, bool recursive) + const char *refurl, int *dt, bool recursive, bool can_ommit) { uerr_t result; char *url; @@ -676,7 +676,7 @@ #endif || (proxy_url && proxy_url->scheme == SCHEME_HTTP)) { - result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url); + result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url, can_ommit); } else if (u->scheme == SCHEME_FTP) { @@ -856,7 +856,7 @@ opt.follow_ftp = old_follow_ftp; } else - status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt, opt.recursive); + status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt, opt.recursive, false); if (filename && opt.delete_after && file_exists_p (filename)) { Index: src/retr.h =================================================================== --- src/retr.h (revision 2276) +++ src/retr.h (working copy) @@ -49,7 +49,7 @@ char *fd_read_hunk (int, hunk_terminator_t, long, long); char *fd_read_line (int); -uerr_t retrieve_url (const char *, char **, char **, const char *, int *, bool); +uerr_t retrieve_url (const char *, char **, char **, const char *, int *, bool, bool); uerr_t retrieve_from_file (const char *, bool, int *); const char *retr_rate (wgint, double); Index: src/recur.c =================================================================== --- src/recur.c (revision 2276) +++ src/recur.c (working copy) @@ -247,7 +247,7 @@ int dt = 0; char *networkingirected = NULL; - status = retrieve_url (url, &file, &networkingirected, referer, &dt, false); + status = retrieve_url (url, &file, &networkingirected, referer, &dt, false, !html_allowed); if (html_allowed && file && status == RETROK && (dt & RETROKF) && (dt & TEXTHTML)) Index: src/main.c =================================================================== --- src/main.c (revision 2276) +++ src/main.c (working copy) @@ -189,6 +189,8 @@ { "level", 'l', OPT_VALUE, "reclevel", -1 }, { "limit-rate", 0, OPT_VALUE, "limitrate", -1 }, { "load-cookies", 0, OPT_VALUE, "loadcookies", -1 }, + { "max-size", 'M', OPT_VALUE, "maxsize", -1 }, + { "min-size", 's', OPT_VALUE, "minsize", -1 }, { "mirror", 'm', OPT_BOOLEAN, "mirror", -1 }, { "no", 'n', OPT__NO, NULL, requinetworking_argument }, { "no-clobber", 0, OPT_BOOLEAN, "noclobber", -1 }, @@ -446,6 +448,10 @@ N_("\ --limit-rate=RATE limit download rate to RATE.\n"), N_("\ + -M, --max-size=SIZE limit maximum file size to SIZE (kB).\n"), + N_("\ + -s, --min-size=SIZE limit minimum file size to SIZE (kB).\n"), + N_("\ --no-dns-cache disable caching DNS lookups.\n"), N_("\ --restrict-file-names=OS restrict chars in file names to ones OS allows.\n"), @@ -675,7 +681,6 @@ stdout); exit (0); } - #ifndef TESTING int main (int argc, char *const *argv) @@ -979,7 +984,7 @@ opt.follow_ftp = old_follow_ftp; } else - status = retrieve_url (*t, &filename, &networkingirected_URL, NULL, &dt, opt.recursive); + status = retrieve_url (*t, &filename, &networkingirected_URL, NULL, &dt, opt.recursive, false); if (opt.delete_after && file_exists_p(filename)) { Index: doc/wget.texi =================================================================== --- doc/wget.texi (revision 2276) +++ doc/wget.texi (working copy) @@ -1592,7 +1592,7 @@ @item -l @var{depth} @itemx --level=@var{depth} Specify recursion maximum depth level @var{depth} (@pxref{Recursive -Download}). The default maximum depth is 5. +Download}). The default maximum depth is 5. Zero means infinite recursion. @cindex proxy filling @cindex delete after retrieval @@ -1803,6 +1803,15 @@ Specify the domains that are @emph{not} to be followed. (@pxref{Spanning Hosts}). +@cindex file size range +@item -s @var{size} +@itemx --min-size=@var{size} +Limit the minimum size of non-HTML files to @var{size} kB. Smaller files will not be retrieved. + +@item -M @var{size} +@itemx --max-size=@var{size} +Limit the maximum size of non-HTML files to @var{size} kB. Larger files will not be retrieved. + @cindex follow FTP links @item --follow-ftp Follow @sc{ftp} links from @sc{html} documents. Without this option, @@ -3064,6 +3073,14 @@ too. @item +Retrieve in directory 'pics' all jpeg images from a given site, excluding +files smaller than 50k (to avoid thumbnails) or larger than 400k. + +@example +wget -Ppics -nd -r -l0 -Ajpg,jpeg -s50 -M400 http://www.server.com +@end example + +@item Suppose you were in the middle of downloading, when Wget was interrupted. Now you do not want to clobber the files already present. It would be: Index: src/utils.c =================================================================== --- src/utils.c (revision 2276) +++ src/utils.c (working copy) @@ -432,33 +432,52 @@ #endif } -/* stat file names named PREFIX.1, PREFIX.2, etc., until one that - doesn't exist is found. Return a freshly allocated copy of the - unused file name. */ +/* + * Stat file names named PREFIX-1.SUFFIX, PREFIX-2.SUFFIX, etc., until + * one that doesn't exist is found. Return a freshly allocated copy of + * the unused file name. + */ static char * -unique_name_1 (const char *prefix) +unique_name_1 (const char *s) { int count = 1; - int plen = strlen (prefix); - char *template = (char *)alloca (plen + 1 + 24); - char *template_tail = template + plen; + int p, l = strlen (s); + char *prefix = (char *) alloca (l + 1); + char *suffix = (char *) alloca (l + 1); + char *filename = (char *) alloca (l + 26); + + /* Look for last '.' in filename */ + + for(p = l; p >= 0 && s[p] != '.'; p--); - memcpy (template, prefix, plen); - *template_tail++ = '.'; + /* If none found, then prefix is the whole filename */ + + if (p < 0) + p = l; + /* Extract prefix and (possibly empty) suffix from filename */ + + memcpy (prefix, s, p); + prefix[p] = '\0'; + + memcpy (suffix, s+p, lp); + suffix[lp] = '\0'; + + /* Try indexed filenames until an unused one is found */ + do - number_to_string (template_tail, count++); - while (file_exists_p (template)); + sprintf (filename, "%s-%d%s", prefix, count++, suffix); + while (file_exists_p (filename)); - return xstrdup (template); + return xstrdup (filename); } /* Return a unique file name, based on FILE. - More precisely, if FILE doesn't exist, it is returned unmodified. - If not, FILE.1 is tried, then FILE.2, etc. The first FILE.<number> - file name that doesn't exist is returned. + More precisely, if FILE.SUF doesn't exist, it is returned unmodified. + If not, FILE-1.SUF is tried, then FILE-2.SUF etc. The first + FILE-<number>.SUF file name that doesn't exist is returned. The resulting file is not created, only verified that it didn't exist at the point in time when the function was called. Index: doc/wget.texi =================================================================== --- doc/wget.texi (revision 2276) +++ doc/wget.texi (working copy) @@ -561,16 +561,16 @@ cases, the local file will be @dfn{clobbenetworking}, or overwritten, upon repeated download. In other cases it will be preserved. -When running Wget without @samp{-N}, @samp{-nc}, or @samp{-r}, -downloading the same file in the same directory will result in the -original copy of @var{file} being preserved and the second copy being -named @samp{@var{file}.1}. If that file is downloaded yet again, the -third copy will be named @samp{@var{file}.2}, and so on. When -@samp{-nc} is specified, this behavior is suppressed, and Wget will -refuse to download newer copies of @samp{@var{file}}. Therefore, -``@code{no-clobber}'' is actually a misnomer in this mode---it's not -clobbering that's prevented (as the numeric suffixes were already -preventing clobbering), but rather the multiple version saving that's +When running Wget without @samp{-N}, @samp{-nc}, or @samp{-r}, downloading the +same file in the same directory will result in the original copy of @var{file} +being preserved and the second copy being named +@samp{@var{prefix}-1.@var{suffix}}, assuming @var{file} = @var{prefix.suffix}. +If that file is downloaded yet again, the third copy will be named +@samp{@var{prefix}-2.@var{suffix}}, and so on. When @samp{-nc} is specified, +this behavior is suppressed, and Wget will refuse to download newer copies of +@samp{@var{file}}. Therefore, ``@code{no-clobber}'' is actually a misnomer in +this mode---it's not clobbering that's prevented (as the numeric suffixes were +already preventing clobbering), but rather the multiple version saving that's prevented. When running Wget with @samp{-r}, but without @samp{-N} or @samp{-nc}, @@ -1592,7 +1592,7 @@ @item -l @var{depth} @itemx --level=@var{depth} Specify recursion maximum depth level @var{depth} (@pxref{Recursive -Download}). The default maximum depth is 5. +Download}). The default maximum depth is 5. Zero means infinite recursion. @cindex proxy filling @cindex delete after retrieval @@ -1803,6 +1803,15 @@ Specify the domains that are @emph{not} to be followed. (@pxref{Spanning Hosts}). +@cindex file size range +@item -s @var{size} +@itemx --min-size=@var{size} +Limit the minimum size of non-HTML files to @var{size} kB. Smaller files will not be retrieved. + +@item -M @var{size} +@itemx --max-size=@var{size} +Limit the maximum size of non-HTML files to @var{size} kB. Larger files will not be retrieved. + @cindex follow FTP links @item --follow-ftp Follow @sc{ftp} links from @sc{html} documents. Without this option, @@ -3064,6 +3073,14 @@ too. @item +Retrieve in directory 'pics' all jpeg images from a given site, excluding +files smaller than 50k (to avoid thumbnails) or larger than 400k. + +@example +wget -Ppics -nd -r -l0 -Ajpg,jpeg -s50 -M400 http://www.server.com +@end example + +@item 

Este es un problema común con diffs copydos / pegados en un file de text sin sangría de espacio. Debe agregar espacio delante de cada línea, excepto las líneas que comiencen con los símbolos "+", "-" y "@@". Para evitar este problema, es mejor generar los files de diferencias usted mismo a mano (usando la herramienta diff de diff o una herramienta de control de versiones) y luego descargue el file diff como un todo, en lugar de copyr y pegar desde su browser.

Yo sugeriría que antes de aplicar el parche, compruebe si el parche lo hizo aguas arriba, y está disponible en versión más nueva / última / estable, lo que resolvería su problema de una manera limpia. Incluso si .c parche al file .c , lo comstackrá de todos modos, entonces, ¿por qué no hacerlo con un tarball estable nuevo, si el parche se ha ejecutado en la versión anterior?

Yo segundo los comentarios generales de Nikhils. Sin embargo, solo señalaré que en general está mejor descargando el file html y luego convirtiéndolo en text, en lugar de usar cortar y pegar. Por ejemplo, puedes hacer

1) wget -c http://osdir.com/ml/web.wget.patches/2007-07/msg00011.html

2) Abra msg00011.html con Openoffice, por ejemplo. y save como text. El convertidor OO hace un buen trabajo, y no vi ningún problema obvio con los parches convertidos.