Compare commits

...

17 Commits

Author SHA1 Message Date
Russ Cox
ca2cb382ba [release-branch.r57] doc: release.r57
««« CL 4457049 / d222384d1bad
doc: release.r57

R=golang-dev, r, adg, r2
CC=golang-dev
https://golang.org/cl/4457049
»»»

R=golang-dev
TBR=r
CC=golang-dev
https://golang.org/cl/4431087
2011-05-03 14:07:31 -04:00
Russ Cox
658a82a13b [release-branch.r57] Make.cmd: create TARGDIR if necessary
««« CL 4437089 / b2670a39da7c
Make.cmd: create TARGDIR if necessary

Fixes #1771.

R=adg, rsc1
CC=golang-dev
https://golang.org/cl/4437089
»»»

R=adg
CC=golang-dev
https://golang.org/cl/4454054
2011-05-03 12:53:51 -04:00
Russ Cox
9f89972666 [release-branch.r57] reflect: allow unexported key in Value.MapIndex
««« CL 4444087 / 9abf81a9df90
reflect: allow unexported key in Value.MapIndex

Fixes #1748.

R=golang-dev, r
CC=golang-dev
https://golang.org/cl/4444087
»»»

R=r, r2
CC=golang-dev
https://golang.org/cl/4433099
2011-05-03 11:08:57 -04:00
Russ Cox
125e91a174 [release-branch.r57] jpeg: speed up RGBA encoding ~%50
««« CL 4433088 / 099dd59d3976
jpeg: speed up RGBA encoding ~%50

Avoids image.At(), color.RGBA(), opposing 8 bit shifts,
and min function calls in a loop.  Not as pretty as before,
but the pure version is still there to revert back to
later if/when the compiler gets better.

before (best of 5)
jpeg.BenchmarkEncodeRGBOpaque   50   64781360 ns/op   18.97 MB/s

after (best of 5)
jpeg.BenchmarkEncodeRGBOpaque   50   42044300 ns/op   29.23 MB/s

(benchmarked on an HP z600; 16 core Xeon E5520 @ 2.27Ghz)

R=r, r2, nigeltao
CC=golang-dev
https://golang.org/cl/4433088
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4431086
2011-05-03 01:50:43 -04:00
Russ Cox
3854d346f6 [release-branch.r57] doc/install: specify clone -u instead of -r
««« CL 4435081 / 48c0b02c6e7f
doc/install: specify clone -u instead of -r

R=rsc
CC=golang-dev
https://golang.org/cl/4435081
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4436080
2011-05-03 01:46:11 -04:00
Russ Cox
348ffe7786 [release-branch.r57] image: fix build
««« CL 4438090 / 29f6e2e230a3
image: fix build
accidentally deleted one method
TBR=rsc

R=rsc
CC=golang-dev
https://golang.org/cl/4438090
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4445084
2011-05-03 01:45:33 -04:00
Russ Cox
69e88b5c52 [release-branch.r57] image: add type-specific Set methods and use them when decoding PNG.
««« CL 4428080 / 2098f7682f2d
image: add type-specific Set methods and use them when decoding PNG.
This speeds up PNG decode about 20% by avoiding per-pixel interface conversions.

R=nigeltao, rsc
CC=golang-dev
https://golang.org/cl/4428080
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4426078
2011-05-03 01:45:01 -04:00
Russ Cox
006e5e279b [release-branch.r57] http/pprof: fix POST reading bug
««« CL 4430075 / ca45c67d28cf
http/pprof: fix POST reading bug

R=bradfitz
CC=golang-dev
https://golang.org/cl/4430075
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4454053
2011-05-03 01:43:30 -04:00
Russ Cox
5af3c75e30 [release-branch.r57] 5a, 6a, 8a, cc: remove old environment variables
««« CL 4445079 / f8cc81f985e5
5a, 6a, 8a, cc: remove old environment variables

Uses of $INCLUDE and $NPROC are left over from Plan 9.
Remove them to avoid causing confusion.

R=golang-dev, r2
CC=golang-dev
https://golang.org/cl/4445079
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4438096
2011-05-03 01:42:19 -04:00
Russ Cox
a7ae73d4e0 [release-branch.r57] runtime, sync/atomic: fix arm cas
««« CL 4436072 / e280d98747be
runtime, sync/atomic: fix arm cas

Works around bug in kernel implementation on old ARM5 kernels.
Bug was fixed on 26 Nov 2007 (between 2.6.23 and 2.6.24) but
old kernels persist.

Fixes #1750.

R=dfc, golang-dev
CC=golang-dev
https://golang.org/cl/4436072
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4452064
2011-05-03 01:41:28 -04:00
Russ Cox
6a9e2c7279 [release-branch.r57] png: speed up opaque RGBA encoding
««« CL 4432077 / 66eb68cbd5c2
png: speed up opaque RGBA encoding

With Linux/8g on a 2006 Mac Mini (1.66 GHz Intel Core Duo,
2KB L1, 2MB L2, 2G main memory), GOMAXPROCS unset:

start:
png.BenchmarkEncodePaletted	      50	  44772820 ns/op
png.BenchmarkEncodeRGBOpaque	      10	 208395900 ns/op
png.BenchmarkEncodeRGBA		       5	 331088000 ns/op

remove interface method calls:
png.BenchmarkEncodePaletted	      50	  44722880 ns/op
png.BenchmarkEncodeRGBOpaque	      10	 139042600 ns/op
png.BenchmarkEncodeRGBA		       5	 334033600 ns/op

flate inline min/max():
png.BenchmarkEncodePaletted	      50	  40631180 ns/op
png.BenchmarkEncodeRGBOpaque	      10	 124894900 ns/op
png.BenchmarkEncodeRGBA		       5	 312099000 ns/op

after adler change:
png.BenchmarkEncodePaletted	      50	  40181760 ns/op
png.BenchmarkEncodeRGBOpaque	      20	 121781950 ns/op
png.BenchmarkEncodeRGBA		       5	 313890800 ns/op

In comparison to 121 ms on this 2006 machine, on my
Core2 Duo 2.66 GHz laptop, the final BenchmarkEncodeRGBOpaque
runs in 27 ms. (these are all for 640x480 images)

R=nigeltao, rsc, r
CC=golang-dev
https://golang.org/cl/4432077
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4430076
2011-05-03 01:40:19 -04:00
Russ Cox
50e57603ab [release-branch.r57] http: rename ErrBodyReadAferClose to ErrBodyReadAfterClose
««« CL 4432085 / 370c77323b75
http: rename ErrBodyReadAferClose to ErrBodyReadAfterClose

R=bradfitz, dsymonds
CC=golang-dev
https://golang.org/cl/4432085
»»»

TBR=adg
CC=golang-dev
https://golang.org/cl/4425086
2011-05-03 01:39:17 -04:00
Russ Cox
d00421a6b1 [release-branch.r57] mime/multipart: fix regression from previous ReadSlice change
««« CL 4432083 / 698b5ea9c782
mime/multipart: fix regression from previous ReadSlice change

The previous change to make multipart use ReadSlice out of
paranoia broke multipart to not deal with large lines in
the bodies.

We should only be paranoid about long lines in the header
sections.

Fixes http://code.google.com/p/camlistore/issues/detail?id=4

R=adg
CC=golang-dev
https://golang.org/cl/4432083
»»»

R=adg
CC=golang-dev
https://golang.org/cl/4452062
2011-05-03 01:15:56 -04:00
Russ Cox
b69a320781 [release-branch.r57] http: new error for reading a body after it's been closed
««« CL 4433094 / e30213b07276
http: new error for reading a body after it's been closed

R=adg
CC=golang-dev
https://golang.org/cl/4433094
»»»

R=adg
CC=golang-dev
https://golang.org/cl/4433098
2011-05-03 01:15:41 -04:00
Russ Cox
ed54b19716 [release-branch.r57] image: png & jpeg encoding benchmarks
««« CL 4445074 / 304d7d2b1d6c
image: png & jpeg encoding benchmarks

No code changes in this CL.

R=r
CC=golang-dev
https://golang.org/cl/4445074
»»»

R=adg
CC=golang-dev
https://golang.org/cl/4453056
2011-05-03 01:15:26 -04:00
Russ Cox
29fdaadbef [release-branch.r57] xml: fix reflect error
««« CL 4431075 / acee6ec98e9a
xml: fix reflect error

Fixes #1749.

R=bradfitz
CC=golang-dev
https://golang.org/cl/4431075
»»»

R=adg
CC=golang-dev
https://golang.org/cl/4457048
2011-05-03 01:15:12 -04:00
Russ Cox
5ce9a8d4fc create release-branch.r57 2011-05-03 01:07:23 -04:00
30 changed files with 3795 additions and 3356 deletions

View File

@@ -139,9 +139,8 @@ h1#title {
padding: 0;
}
#content h2 {
border-top: 1px solid #ddd;
background: #E2E7F0;
padding: 5px;
border-top: 2px solid #ddd;
padding: 8px 5px;
margin: 1.5em 0 0;
}
#content .subtitle {

View File

@@ -2,7 +2,8 @@
<ul>
<li><a href="roadmap.html">Roadmap</a></li>
<li><a href="release.html">Release History</a></li>
<li><a href="release.html">Release history</a></li>
<li><a href="weekly.html">Weekly snapshot history</a></li>
<li><a href="http://godashboard.appspot.com">Build and benchmark status</a></li>
</ul>
<ul>

File diff suppressed because it is too large Load Diff

View File

@@ -45,11 +45,7 @@ Debugger.
<li>
App Engine support.
<li>
Improved CGO including some mechanism for calling back from C to Go.
<li>
Improved implementation documentation.
<li>
Faster, allocation-light reflection.
</ul>
<h4 id="Gc_roadmap">
@@ -91,8 +87,6 @@ Packages roadmap</h4>
<ul>
<li>
Faster, allocation-light reflection.
<li>
Faster, RE2-like regular expressions.
<li>
Comprehensive support for international text.
@@ -134,4 +128,8 @@ Package manager (goinstall).
A means of recovering from a panic (recover).
<li>
5g: Better floating point support.
<li>
Improved CGO including some mechanism for calling back from C to Go.
<li>
Faster, allocation-light reflection.
</ul>

2944
doc/devel/weekly.html Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -136,7 +136,7 @@ and make sure the <code>go</code> directory does not exist.
Then check out the repository:</p>
<pre>
$ hg clone -r release https://go.googlecode.com/hg/ go
$ hg clone -u release https://go.googlecode.com/hg/ go
</pre>
<h2 id="install">Install Go</h2>

View File

@@ -112,6 +112,7 @@ defaultcc = None
contributors = {}
missing_codereview = None
real_rollback = None
releaseBranch = None
#######################################################################
# RE: UNICODE STRING HANDLING
@@ -1049,7 +1050,7 @@ def change(ui, repo, *pats, **opts):
if missing_codereview:
return missing_codereview
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
@@ -1062,6 +1063,8 @@ def change(ui, repo, *pats, **opts):
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
if repo[None].branch() != "default":
return "cannot run hg change outside default branch"
name = "new"
cl = CL("new")
dirty[cl] = True
@@ -1154,7 +1157,9 @@ def clpatch(ui, repo, clname, **opts):
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
return clpatch_or_undo(ui, repo, clname, opts)
if repo[None].branch() != "default":
return "cannot run hg clpatch outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
@@ -1163,7 +1168,66 @@ def undo(ui, repo, clname, **opts):
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
return clpatch_or_undo(ui, repo, clname, opts, undo=True)
if repo[None].branch() != "default":
return "cannot run hg undo outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="undo")
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
return "no active release branches"
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise util.Abort("uncommitted local changes - cannot switch branches")
err = hg.clean(repo, releaseBranch)
if err:
return err
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise util.Abort(err)
except Exception, e:
hg.clean(repo, "default")
raise e
return None
def rev2clname(rev):
# Extract CL name from revision description.
@@ -1185,15 +1249,24 @@ undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, undo=False):
def clpatch_or_undo(ui, repo, clname, opts, mode):
if missing_codereview:
return missing_codereview
if undo:
if mode == "undo" or mode == "backport":
if hgversion < '1.4':
# Don't have cmdutil.match (see implementation of sync command).
return "hg is too old to run hg undo - update to 1.4 or newer"
return "hg is too old to run hg %s - update to 1.4 or newer" % mode
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
@@ -1227,8 +1300,19 @@ def clpatch_or_undo(ui, repo, clname, opts, undo=False):
# Create fresh CL and start with patch that would reverse the change.
vers = short(rev.node())
cl = CL("new")
cl.desc = (undoHeader % (clname, vers)) + rev.description() + undoFooter
patch = RunShell(["hg", "diff", "--git", "-r", vers + ":" + short(rev.parents()[0].node())])
desc = rev.description()
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
@@ -1249,10 +1333,10 @@ def clpatch_or_undo(ui, repo, clname, opts, undo=False):
if id != vers:
patch, err = portPatch(repo, patch, vers, id)
if err != "":
return "codereview issue %s is out of date: %s" % (clname, err)
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
argv = ["hgpatch"]
if opts["no_incoming"]:
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
@@ -1271,7 +1355,7 @@ def clpatch_or_undo(ui, repo, clname, opts, undo=False):
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if undo:
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
@@ -1506,7 +1590,7 @@ def reposetup(ui, repo):
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
_, userline = FindContributor(ui, repo, user, warn=False)
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
@@ -1524,7 +1608,7 @@ def FindContributor(ui, repo, user=None, warn=True):
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return None, None
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
@@ -1650,6 +1734,14 @@ def submit(ui, repo, *pats, **opts):
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg.clean(repo, "default")
if err:
return err
return None
def sync(ui, repo, **opts):
"""synchronize with remote repository
@@ -1822,6 +1914,15 @@ cmdtable = {
] + commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
@@ -2263,6 +2364,19 @@ def RietveldSetup(ui, repo):
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchtags().keys()
if 'release-branch.r100' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.r99 with
# release-branch.r100. If we do ten releases a year
# that gives us 4 years before we have to worry about this.
raise util.Abort('tags.sort needs to be fixed for release-branch.r100')
tags.sort()
for t in tags:
if t.startswith('release-branch.'):
releaseBranch = t
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.

View File

@@ -25,7 +25,7 @@ _go_.$O: $(GOFILES) $(PREREQ)
install: $(TARGDIR)/$(TARG)
$(TARGDIR)/$(TARG): $(TARG)
cp -f $(TARG) $(TARGDIR)
mkdir -p $(TARGDIR) && cp -f $(TARG) $(TARGDIR)
CLEANFILES+=$(TARG) _test _testmain.go

View File

@@ -50,7 +50,7 @@ void
main(int argc, char *argv[])
{
char *p;
int nout, nproc, i, c;
int c;
thechar = '5';
thestring = "arm";
@@ -94,46 +94,10 @@ main(int argc, char *argv[])
print("usage: %ca [-options] file.s\n", thechar);
errorexit();
}
if(argc > 1 && systemtype(Windows)){
print("can't assemble multiple files on windows\n");
if(argc > 1){
print("can't assemble multiple files\n");
errorexit();
}
if(argc > 1 && !systemtype(Windows)) {
nproc = 1;
if(p = getenv("NPROC"))
nproc = atol(p); /* */
c = 0;
nout = 0;
for(;;) {
Waitmsg *w;
while(nout < nproc && argc > 0) {
i = fork();
if(i < 0) {
fprint(2, "fork: %r\n");
errorexit();
}
if(i == 0) {
print("%s:\n", *argv);
if(assemble(*argv))
errorexit();
exits(0);
}
nout++;
argc--;
argv++;
}
w = wait();
if(w == nil) {
if(c)
errorexit();
exits(0);
}
if(w->msg[0])
c++;
nout--;
}
}
if(assemble(argv[0]))
errorexit();
exits(0);
@@ -142,7 +106,7 @@ main(int argc, char *argv[])
int
assemble(char *file)
{
char *ofile, incfile[20], *p;
char *ofile, *p;
int i, of;
ofile = alloc(strlen(file)+3); // +3 for .x\0 (x=thechar)
@@ -167,15 +131,6 @@ assemble(char *file)
} else
outfile = "/dev/null";
}
p = getenv("INCLUDE");
if(p) {
setinclude(p);
} else {
if(systemtype(Plan9)) {
sprint(incfile,"/%s/include", thestring);
setinclude(strdup(incfile));
}
}
of = create(outfile, OWRITE, 0664);
if(of < 0) {

View File

@@ -56,7 +56,7 @@ void
main(int argc, char *argv[])
{
char *p;
int nout, nproc, i, c;
int c;
thechar = '6';
thestring = "amd64";
@@ -96,46 +96,10 @@ main(int argc, char *argv[])
print("usage: %ca [-options] file.s\n", thechar);
errorexit();
}
if(argc > 1 && systemtype(Windows)){
print("can't assemble multiple files on windows\n");
if(argc > 1){
print("can't assemble multiple files\n");
errorexit();
}
if(argc > 1 && !systemtype(Windows)) {
nproc = 1;
if(p = getenv("NPROC"))
nproc = atol(p); /* */
c = 0;
nout = 0;
for(;;) {
Waitmsg *w;
while(nout < nproc && argc > 0) {
i = fork();
if(i < 0) {
fprint(2, "fork: %r\n");
errorexit();
}
if(i == 0) {
print("%s:\n", *argv);
if(assemble(*argv))
errorexit();
exits(0);
}
nout++;
argc--;
argv++;
}
w = wait();
if(w == nil) {
if(c)
errorexit();
exits(0);
}
if(w->msg[0])
c++;
nout--;
}
}
if(assemble(argv[0]))
errorexit();
exits(0);
@@ -144,7 +108,7 @@ main(int argc, char *argv[])
int
assemble(char *file)
{
char *ofile, incfile[20], *p;
char *ofile, *p;
int i, of;
ofile = alloc(strlen(file)+3); // +3 for .x\0 (x=thechar)
@@ -169,15 +133,6 @@ assemble(char *file)
} else
outfile = "/dev/null";
}
p = getenv("INCLUDE");
if(p) {
setinclude(p);
} else {
if(systemtype(Plan9)) {
sprint(incfile,"/%s/include", thestring);
setinclude(strdup(incfile));
}
}
of = create(outfile, OWRITE, 0664);
if(of < 0) {

View File

@@ -56,7 +56,7 @@ void
main(int argc, char *argv[])
{
char *p;
int nout, nproc, i, c;
int c;
thechar = '8';
thestring = "386";
@@ -96,46 +96,10 @@ main(int argc, char *argv[])
print("usage: %ca [-options] file.s\n", thechar);
errorexit();
}
if(argc > 1 && systemtype(Windows)){
print("can't assemble multiple files on windows\n");
if(argc > 1){
print("can't assemble multiple files\n");
errorexit();
}
if(argc > 1 && !systemtype(Windows)) {
nproc = 1;
if(p = getenv("NPROC"))
nproc = atol(p); /* */
c = 0;
nout = 0;
for(;;) {
Waitmsg *w;
while(nout < nproc && argc > 0) {
i = fork();
if(i < 0) {
fprint(2, "fork: %r\n");
errorexit();
}
if(i == 0) {
print("%s:\n", *argv);
if(assemble(*argv))
errorexit();
exits(0);
}
nout++;
argc--;
argv++;
}
w = wait();
if(w == nil) {
if(c)
errorexit();
exits(0);
}
if(w->msg[0])
c++;
nout--;
}
}
if(assemble(argv[0]))
errorexit();
exits(0);
@@ -144,7 +108,7 @@ main(int argc, char *argv[])
int
assemble(char *file)
{
char *ofile, incfile[20], *p;
char *ofile, *p;
int i, of;
ofile = alloc(strlen(file)+3); // +3 for .x\0 (x=thechar)
@@ -169,15 +133,6 @@ assemble(char *file)
} else
outfile = "/dev/null";
}
p = getenv("INCLUDE");
if(p) {
setinclude(p);
} else {
if(systemtype(Plan9)) {
sprint(incfile,"/%s/include", thestring);
setinclude(strdup(incfile));
}
}
of = create(outfile, OWRITE, 0664);
if(of < 0) {

View File

@@ -88,7 +88,7 @@ void
main(int argc, char *argv[])
{
char **defs, *p;
int nproc, nout, i, c, ndef;
int c, ndef;
ensuresymb(NSYMB);
memset(debug, 0, sizeof(debug));
@@ -142,51 +142,10 @@ main(int argc, char *argv[])
print("usage: %cc [-options] files\n", thechar);
errorexit();
}
if(argc > 1 && systemtype(Windows)){
print("can't compile multiple files on windows\n");
if(argc > 1){
print("can't compile multiple files\n");
errorexit();
}
if(argc > 1 && !systemtype(Windows)) {
nproc = 1;
/*
* if we're writing acid to standard output, don't compile
* concurrently, to avoid interleaving output.
*/
if(((!debug['a'] && !debug['q'] && !debug['Q']) || debug['n']) &&
(p = getenv("NPROC")) != nil)
nproc = atol(p); /* */
c = 0;
nout = 0;
for(;;) {
Waitmsg *w;
while(nout < nproc && argc > 0) {
i = fork();
if(i < 0) {
print("cannot create a process\n");
errorexit();
}
if(i == 0) {
fprint(2, "%s:\n", *argv);
if (compile(*argv, defs, ndef))
errorexit();
exits(0);
}
nout++;
argc--;
argv++;
}
w = wait();
if(w == nil) {
if(c)
errorexit();
exits(0);
}
if(w->msg[0])
c++;
nout--;
}
}
if(argc == 0)
c = compile("stdin", defs, ndef);
@@ -201,7 +160,7 @@ main(int argc, char *argv[])
int
compile(char *file, char **defs, int ndef)
{
char *ofile, incfile[20];
char *ofile;
char *p, **av, opt[256];
int i, c, fd[2];
static int first = 1;
@@ -236,15 +195,6 @@ compile(char *file, char **defs, int ndef)
outfile = "/dev/null";
}
if(p = getenv("INCLUDE")) {
setinclude(p);
} else {
if(systemtype(Plan9)) {
sprint(incfile, "/%s/include", thestring);
setinclude(strdup(incfile));
setinclude("/sys/include");
}
}
if (first)
Binit(&diagbuf, 1, OWRITE);
/*

View File

@@ -2880,17 +2880,18 @@ sub FetchSymbols {
my @toask = @pcs;
while (@toask > 0) {
my $n = @toask;
if ($n > 49) { $n = 49; }
# NOTE(rsc): Limiting the number of PCs requested per round
# used to be necessary, but I think it was a bug in
# debug/pprof/symbol's implementation. Leaving here
# in case I am wrong.
# if ($n > 49) { $n = 49; }
my @thisround = @toask[0..$n];
my $t = @toask;
print STDERR "$n $t\n";
@toask = @toask[($n+1)..(@toask-1)];
my $post_data = join("+", sort((map {"0x" . "$_"} @thisround)));
open(POSTFILE, ">$main::tmpfile_sym");
print POSTFILE $post_data;
close(POSTFILE);
print STDERR "SYMBL!\n";
my $url = SymbolPageURL();
$url = ResolveRedirectionForCurl($url);
my $command_line = "$CURL -sd '\@$main::tmpfile_sym' '$url'";

View File

@@ -143,10 +143,18 @@ func (d *compressor) fillWindow(index int) (int, os.Error) {
d.blockStart = math.MaxInt32
}
for i, h := range d.hashHead {
d.hashHead[i] = max(h-wSize, -1)
v := h - wSize
if v < -1 {
v = -1
}
d.hashHead[i] = v
}
for i, h := range d.hashPrev {
d.hashPrev[i] = max(h-wSize, -1)
v := -h - wSize
if v < -1 {
v = -1
}
d.hashPrev[i] = v
}
}
count, err := d.r.Read(d.window[d.windowEnd:])
@@ -177,10 +185,18 @@ func (d *compressor) writeBlock(tokens []token, index int, eof bool) os.Error {
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
win := d.window[0 : pos+min(maxMatchLength, lookahead)]
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
}
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
nice := min(d.niceMatch, len(win)-pos)
nice := len(win) - pos
if d.niceMatch < nice {
nice = d.niceMatch
}
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.maxChainLength
@@ -344,9 +360,12 @@ Loop:
}
prevLength := length
prevOffset := offset
minIndex := max(index-maxOffset, 0)
length = minMatchLength - 1
offset = 0
minIndex := index - maxOffset
if minIndex < 0 {
minIndex = 0
}
if chainHead >= minIndex &&
(isFastDeflate && lookahead > minMatchLength-1 ||

View File

@@ -26,6 +26,7 @@ package pprof
import (
"bufio"
"bytes"
"fmt"
"http"
"os"
@@ -88,10 +89,14 @@ func Profile(w http.ResponseWriter, r *http.Request) {
func Symbol(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
// We have to read the whole POST body before
// writing any output. Buffer the output here.
var buf bytes.Buffer
// We don't know how many symbols we have, but we
// do have symbol information. Pprof only cares whether
// this number is 0 (no symbols available) or > 0.
fmt.Fprintf(w, "num_symbols: 1\n")
fmt.Fprintf(&buf, "num_symbols: 1\n")
var b *bufio.Reader
if r.Method == "POST" {
@@ -109,14 +114,19 @@ func Symbol(w http.ResponseWriter, r *http.Request) {
if pc != 0 {
f := runtime.FuncForPC(uintptr(pc))
if f != nil {
fmt.Fprintf(w, "%#x %s\n", pc, f.Name())
fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name())
}
}
// Wait until here to check for err; the last
// symbol will have an err because it doesn't end in +.
if err != nil {
if err != os.EOF {
fmt.Fprintf(&buf, "reading request: %v\n", err)
}
break
}
}
w.Write(buf.Bytes())
}

View File

@@ -439,9 +439,29 @@ type body struct {
hdr interface{} // non-nil (Response or Request) value means read trailer
r *bufio.Reader // underlying wire-format reader for the trailer
closing bool // is the connection to be closed after reading body?
closed bool
}
// ErrBodyReadAfterClose is returned when reading a Request Body after
// the body has been closed. This typically happens when the body is
// read after an HTTP Handler calls WriteHeader or Write on its
// ResponseWriter.
var ErrBodyReadAfterClose = os.NewError("http: invalid Read on closed request Body")
func (b *body) Read(p []byte) (n int, err os.Error) {
if b.closed {
return 0, ErrBodyReadAfterClose
}
return b.Reader.Read(p)
}
func (b *body) Close() os.Error {
if b.closed {
return nil
}
defer func() {
b.closed = true
}()
if b.hdr == nil && b.closing {
// no trailer and closing the connection next.
// no point in reading to EOF.

View File

@@ -51,6 +51,13 @@ func (p *RGBA) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toRGBAColor(c).(RGBAColor)
}
func (p *RGBA) SetRGBA(x, y int, c RGBAColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *RGBA) Opaque() bool {
if p.Rect.Empty() {
@@ -103,6 +110,13 @@ func (p *RGBA64) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toRGBA64Color(c).(RGBA64Color)
}
func (p *RGBA64) SetRGBA64(x, y int, c RGBA64Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *RGBA64) Opaque() bool {
if p.Rect.Empty() {
@@ -155,6 +169,13 @@ func (p *NRGBA) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toNRGBAColor(c).(NRGBAColor)
}
func (p *NRGBA) SetNRGBA(x, y int, c NRGBAColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *NRGBA) Opaque() bool {
if p.Rect.Empty() {
@@ -207,6 +228,13 @@ func (p *NRGBA64) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toNRGBA64Color(c).(NRGBA64Color)
}
func (p *NRGBA64) SetNRGBA64(x, y int, c NRGBA64Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *NRGBA64) Opaque() bool {
if p.Rect.Empty() {
@@ -252,13 +280,20 @@ func (p *Alpha) At(x, y int) Color {
return p.Pix[y*p.Stride+x]
}
func (p *Alpha) Set(x, y int, c Color) {
func (p *Alpha) Set(x, y int, c AlphaColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toAlphaColor(c).(AlphaColor)
}
func (p *Alpha) SetAlpha(x, y int, c AlphaColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Alpha) Opaque() bool {
if p.Rect.Empty() {
@@ -311,6 +346,13 @@ func (p *Alpha16) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toAlpha16Color(c).(Alpha16Color)
}
func (p *Alpha16) SetAlpha16(x, y int, c Alpha16Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Alpha16) Opaque() bool {
if p.Rect.Empty() {
@@ -363,6 +405,13 @@ func (p *Gray) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toGrayColor(c).(GrayColor)
}
func (p *Gray) SetGray(x, y int, c GrayColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Gray) Opaque() bool {
return true
@@ -401,6 +450,13 @@ func (p *Gray16) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toGray16Color(c).(Gray16Color)
}
func (p *Gray16) SetGray16(x, y int, c Gray16Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Gray16) Opaque() bool {
return true

View File

@@ -391,6 +391,31 @@ func toYCbCr(m image.Image, p image.Point, yBlock, cbBlock, crBlock *block) {
}
}
// rgbaToYCbCr is a specialized version of toYCbCr for image.RGBA images.
func rgbaToYCbCr(m *image.RGBA, p image.Point, yBlock, cbBlock, crBlock *block) {
b := m.Bounds()
xmax := b.Max.X - 1
ymax := b.Max.Y - 1
for j := 0; j < 8; j++ {
sj := p.Y + j
if sj > ymax {
sj = ymax
}
yoff := sj * m.Stride
for i := 0; i < 8; i++ {
sx := p.X + i
if sx > xmax {
sx = xmax
}
col := &m.Pix[yoff+sx]
yy, cb, cr := ycbcr.RGBToYCbCr(col.R, col.G, col.B)
yBlock[8*j+i] = int(yy)
cbBlock[8*j+i] = int(cb)
crBlock[8*j+i] = int(cr)
}
}
}
// scale scales the 16x16 region represented by the 4 src blocks to the 8x8
// dst block.
func scale(dst *block, src *[4]block) {
@@ -431,13 +456,18 @@ func (e *encoder) writeSOS(m image.Image) {
prevDCY, prevDCCb, prevDCCr int
)
bounds := m.Bounds()
rgba, _ := m.(*image.RGBA)
for y := bounds.Min.Y; y < bounds.Max.Y; y += 16 {
for x := bounds.Min.X; x < bounds.Max.X; x += 16 {
for i := 0; i < 4; i++ {
xOff := (i & 1) * 8
yOff := (i & 2) * 4
p := image.Point{x + xOff, y + yOff}
toYCbCr(m, p, &yBlock, &cbBlock[i], &crBlock[i])
if rgba != nil {
rgbaToYCbCr(rgba, p, &yBlock, &cbBlock[i], &crBlock[i])
} else {
toYCbCr(m, p, &yBlock, &cbBlock[i], &crBlock[i])
}
prevDCY = e.writeBlock(&yBlock, 0, prevDCY)
}
scale(&cBlock, &cbBlock)

View File

@@ -8,6 +8,8 @@ import (
"bytes"
"image"
"image/png"
"io/ioutil"
"rand"
"os"
"testing"
)
@@ -85,3 +87,29 @@ func TestWriter(t *testing.T) {
}
}
}
func BenchmarkEncodeRGBOpaque(b *testing.B) {
b.StopTimer()
img := image.NewRGBA(640, 480)
// Set all pixels to 0xFF alpha to force opaque mode.
bo := img.Bounds()
rnd := rand.New(rand.NewSource(123))
for y := bo.Min.Y; y < bo.Max.Y; y++ {
for x := bo.Min.X; x < bo.Max.X; x++ {
img.Set(x, y, image.RGBAColor{
uint8(rnd.Intn(256)),
uint8(rnd.Intn(256)),
uint8(rnd.Intn(256)),
255})
}
}
if !img.Opaque() {
panic("expected image to be opaque")
}
b.SetBytes(640 * 480 * 4)
b.StartTimer()
options := &Options{Quality: 90}
for i := 0; i < b.N; i++ {
Encode(ioutil.Discard, img, options)
}
}

View File

@@ -378,7 +378,7 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
for x := 0; x < d.width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < d.width; x2++ {
gray.Set(x+x2, y, image.GrayColor{(b >> 7) * 0xff})
gray.SetGray(x+x2, y, image.GrayColor{(b >> 7) * 0xff})
b <<= 1
}
}
@@ -386,7 +386,7 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
for x := 0; x < d.width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < d.width; x2++ {
gray.Set(x+x2, y, image.GrayColor{(b >> 6) * 0x55})
gray.SetGray(x+x2, y, image.GrayColor{(b >> 6) * 0x55})
b <<= 2
}
}
@@ -394,22 +394,22 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
for x := 0; x < d.width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < d.width; x2++ {
gray.Set(x+x2, y, image.GrayColor{(b >> 4) * 0x11})
gray.SetGray(x+x2, y, image.GrayColor{(b >> 4) * 0x11})
b <<= 4
}
}
case cbG8:
for x := 0; x < d.width; x++ {
gray.Set(x, y, image.GrayColor{cdat[x]})
gray.SetGray(x, y, image.GrayColor{cdat[x]})
}
case cbGA8:
for x := 0; x < d.width; x++ {
ycol := cdat[2*x+0]
nrgba.Set(x, y, image.NRGBAColor{ycol, ycol, ycol, cdat[2*x+1]})
nrgba.SetNRGBA(x, y, image.NRGBAColor{ycol, ycol, ycol, cdat[2*x+1]})
}
case cbTC8:
for x := 0; x < d.width; x++ {
rgba.Set(x, y, image.RGBAColor{cdat[3*x+0], cdat[3*x+1], cdat[3*x+2], 0xff})
rgba.SetRGBA(x, y, image.RGBAColor{cdat[3*x+0], cdat[3*x+1], cdat[3*x+2], 0xff})
}
case cbP1:
for x := 0; x < d.width; x += 8 {
@@ -456,25 +456,25 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
}
case cbTCA8:
for x := 0; x < d.width; x++ {
nrgba.Set(x, y, image.NRGBAColor{cdat[4*x+0], cdat[4*x+1], cdat[4*x+2], cdat[4*x+3]})
nrgba.SetNRGBA(x, y, image.NRGBAColor{cdat[4*x+0], cdat[4*x+1], cdat[4*x+2], cdat[4*x+3]})
}
case cbG16:
for x := 0; x < d.width; x++ {
ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1])
gray16.Set(x, y, image.Gray16Color{ycol})
gray16.SetGray16(x, y, image.Gray16Color{ycol})
}
case cbGA16:
for x := 0; x < d.width; x++ {
ycol := uint16(cdat[4*x+0])<<8 | uint16(cdat[4*x+1])
acol := uint16(cdat[4*x+2])<<8 | uint16(cdat[4*x+3])
nrgba64.Set(x, y, image.NRGBA64Color{ycol, ycol, ycol, acol})
nrgba64.SetNRGBA64(x, y, image.NRGBA64Color{ycol, ycol, ycol, acol})
}
case cbTC16:
for x := 0; x < d.width; x++ {
rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1])
gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3])
bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5])
rgba64.Set(x, y, image.RGBA64Color{rcol, gcol, bcol, 0xffff})
rgba64.SetRGBA64(x, y, image.RGBA64Color{rcol, gcol, bcol, 0xffff})
}
case cbTCA16:
for x := 0; x < d.width; x++ {
@@ -482,7 +482,7 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
gcol := uint16(cdat[8*x+2])<<8 | uint16(cdat[8*x+3])
bcol := uint16(cdat[8*x+4])<<8 | uint16(cdat[8*x+5])
acol := uint16(cdat[8*x+6])<<8 | uint16(cdat[8*x+7])
nrgba64.Set(x, y, image.NRGBA64Color{rcol, gcol, bcol, acol})
nrgba64.SetNRGBA64(x, y, image.NRGBA64Color{rcol, gcol, bcol, acol})
}
}

View File

@@ -263,7 +263,12 @@ func writeImage(w io.Writer, m image.Image, cb int) os.Error {
defer zw.Close()
bpp := 0 // Bytes per pixel.
// Used by fast paths for common image types
var paletted *image.Paletted
var rgba *image.RGBA
rgba, _ = m.(*image.RGBA)
switch cb {
case cbG8:
bpp = 1
@@ -303,12 +308,24 @@ func writeImage(w io.Writer, m image.Image, cb int) os.Error {
cr[0][x+1] = c.Y
}
case cbTC8:
for x := b.Min.X; x < b.Max.X; x++ {
// We have previously verified that the alpha value is fully opaque.
r, g, b, _ := m.At(x, y).RGBA()
cr[0][3*x+1] = uint8(r >> 8)
cr[0][3*x+2] = uint8(g >> 8)
cr[0][3*x+3] = uint8(b >> 8)
// We have previously verified that the alpha value is fully opaque.
cr0 := cr[0]
if rgba != nil {
yoff := y * rgba.Stride
xoff := 3*b.Min.X + 1
for _, color := range rgba.Pix[yoff+b.Min.X : yoff+b.Max.X] {
cr0[xoff] = color.R
cr0[xoff+1] = color.G
cr0[xoff+2] = color.B
xoff += 3
}
} else {
for x := b.Min.X; x < b.Max.X; x++ {
r, g, b, _ := m.At(x, y).RGBA()
cr0[3*x+1] = uint8(r >> 8)
cr0[3*x+2] = uint8(g >> 8)
cr0[3*x+3] = uint8(b >> 8)
}
}
case cbP8:
rowOffset := y * paletted.Stride

View File

@@ -5,10 +5,10 @@
package png
import (
"bytes"
"fmt"
"image"
"io"
"io/ioutil"
"os"
"testing"
)
@@ -81,10 +81,42 @@ func BenchmarkEncodePaletted(b *testing.B) {
image.RGBAColor{0, 0, 0, 255},
image.RGBAColor{255, 255, 255, 255},
})
b.SetBytes(640 * 480 * 1)
b.StartTimer()
buffer := new(bytes.Buffer)
for i := 0; i < b.N; i++ {
buffer.Reset()
Encode(buffer, img)
Encode(ioutil.Discard, img)
}
}
func BenchmarkEncodeRGBOpaque(b *testing.B) {
b.StopTimer()
img := image.NewRGBA(640, 480)
// Set all pixels to 0xFF alpha to force opaque mode.
bo := img.Bounds()
for y := bo.Min.Y; y < bo.Max.Y; y++ {
for x := bo.Min.X; x < bo.Max.X; x++ {
img.Set(x, y, image.RGBAColor{0, 0, 0, 255})
}
}
if !img.Opaque() {
panic("expected image to be opaque")
}
b.SetBytes(640 * 480 * 4)
b.StartTimer()
for i := 0; i < b.N; i++ {
Encode(ioutil.Discard, img)
}
}
func BenchmarkEncodeRGBA(b *testing.B) {
b.StopTimer()
img := image.NewRGBA(640, 480)
if img.Opaque() {
panic("expected image to not be opaque")
}
b.SetBytes(640 * 480 * 4)
b.StartTimer()
for i := 0; i < b.N; i++ {
Encode(ioutil.Discard, img)
}
}

View File

@@ -15,13 +15,13 @@ package multipart
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"mime"
"net/textproto"
"os"
"regexp"
"strings"
)
var headerRegexp *regexp.Regexp = regexp.MustCompile("^([a-zA-Z0-9\\-]+): *([^\r\n]+)")
@@ -79,25 +79,28 @@ func (p *Part) FormName() string {
// NewReader creates a new multipart Reader reading from r using the
// given MIME boundary.
func NewReader(reader io.Reader, boundary string) Reader {
b := []byte("\r\n--" + boundary + "--")
return &multiReader{
boundary: boundary,
dashBoundary: "--" + boundary,
endLine: "--" + boundary + "--",
bufReader: bufio.NewReader(reader),
bufReader: bufio.NewReader(reader),
nlDashBoundary: b[:len(b)-2],
dashBoundaryDash: b[2:],
dashBoundary: b[2 : len(b)-2],
}
}
// Implementation ....
func newPart(mr *multiReader) (bp *Part, err os.Error) {
bp = new(Part)
bp.Header = make(map[string][]string)
bp.mr = mr
bp.buffer = new(bytes.Buffer)
if err = bp.populateHeaders(); err != nil {
bp = nil
func newPart(mr *multiReader) (*Part, os.Error) {
bp := &Part{
Header: make(map[string][]string),
mr: mr,
buffer: new(bytes.Buffer),
}
return
if err := bp.populateHeaders(); err != nil {
return nil, err
}
return bp, nil
}
func (bp *Part) populateHeaders() os.Error {
@@ -122,44 +125,49 @@ func (bp *Part) populateHeaders() os.Error {
// Read reads the body of a part, after its headers and before the
// next part (if any) begins.
func (bp *Part) Read(p []byte) (n int, err os.Error) {
for {
if bp.buffer.Len() >= len(p) {
// Internal buffer of unconsumed data is large enough for
// the read request. No need to parse more at the moment.
break
}
if !bp.mr.ensureBufferedLine() {
return 0, io.ErrUnexpectedEOF
}
if bp.mr.bufferedLineIsBoundary() {
// Don't consume this line
break
}
// Write all of this line, except the final CRLF
s := *bp.mr.bufferedLine
if strings.HasSuffix(s, "\r\n") {
bp.mr.consumeLine()
if !bp.mr.ensureBufferedLine() {
return 0, io.ErrUnexpectedEOF
}
if bp.mr.bufferedLineIsBoundary() {
// The final \r\n isn't ours. It logically belongs
// to the boundary line which follows.
bp.buffer.WriteString(s[0 : len(s)-2])
} else {
bp.buffer.WriteString(s)
}
break
}
if strings.HasSuffix(s, "\n") {
bp.buffer.WriteString(s)
bp.mr.consumeLine()
continue
}
return 0, os.NewError("multipart parse error during Read; unexpected line: " + s)
if bp.buffer.Len() >= len(p) {
// Internal buffer of unconsumed data is large enough for
// the read request. No need to parse more at the moment.
return bp.buffer.Read(p)
}
return bp.buffer.Read(p)
peek, err := bp.mr.bufReader.Peek(4096) // TODO(bradfitz): add buffer size accessor
unexpectedEof := err == os.EOF
if err != nil && !unexpectedEof {
return 0, fmt.Errorf("multipart: Part Read: %v", err)
}
if peek == nil {
panic("nil peek buf")
}
// Search the peek buffer for "\r\n--boundary". If found,
// consume everything up to the boundary. If not, consume only
// as much of the peek buffer as cannot hold the boundary
// string.
nCopy := 0
foundBoundary := false
if idx := bytes.Index(peek, bp.mr.nlDashBoundary); idx != -1 {
nCopy = idx
foundBoundary = true
} else if safeCount := len(peek) - len(bp.mr.nlDashBoundary); safeCount > 0 {
nCopy = safeCount
} else if unexpectedEof {
// If we've run out of peek buffer and the boundary
// wasn't found (and can't possibly fit), we must have
// hit the end of the file unexpectedly.
return 0, io.ErrUnexpectedEOF
}
if nCopy > 0 {
if _, err := io.Copyn(bp.buffer, bp.mr.bufReader, int64(nCopy)); err != nil {
return 0, err
}
}
n, err = bp.buffer.Read(p)
if err == os.EOF && !foundBoundary {
// If the boundary hasn't been reached there's more to
// read, so don't pass through an EOF from the buffer
err = nil
}
return
}
func (bp *Part) Close() os.Error {
@@ -168,46 +176,12 @@ func (bp *Part) Close() os.Error {
}
type multiReader struct {
boundary string
dashBoundary string // --boundary
endLine string // --boundary--
bufReader *bufio.Reader
bufferedLine *string
bufReader *bufio.Reader
currentPart *Part
partsRead int
}
func (mr *multiReader) eof() bool {
return mr.bufferedLine == nil &&
!mr.readLine()
}
func (mr *multiReader) readLine() bool {
lineBytes, err := mr.bufReader.ReadSlice('\n')
if err != nil {
// TODO: care about err being EOF or not?
return false
}
line := string(lineBytes)
mr.bufferedLine = &line
return true
}
func (mr *multiReader) bufferedLineIsBoundary() bool {
return strings.HasPrefix(*mr.bufferedLine, mr.dashBoundary)
}
func (mr *multiReader) ensureBufferedLine() bool {
if mr.bufferedLine == nil {
return mr.readLine()
}
return true
}
func (mr *multiReader) consumeLine() {
mr.bufferedLine = nil
nlDashBoundary, dashBoundaryDash, dashBoundary []byte
}
func (mr *multiReader) NextPart() (*Part, os.Error) {
@@ -215,13 +189,14 @@ func (mr *multiReader) NextPart() (*Part, os.Error) {
mr.currentPart.Close()
}
expectNewPart := false
for {
if mr.eof() {
return nil, io.ErrUnexpectedEOF
line, err := mr.bufReader.ReadSlice('\n')
if err != nil {
return nil, fmt.Errorf("multipart: NextPart: %v", err)
}
if isBoundaryDelimiterLine(*mr.bufferedLine, mr.dashBoundary) {
mr.consumeLine()
if mr.isBoundaryDelimiterLine(line) {
mr.partsRead++
bp, err := newPart(mr)
if err != nil {
@@ -231,55 +206,67 @@ func (mr *multiReader) NextPart() (*Part, os.Error) {
return bp, nil
}
if hasPrefixThenNewline(*mr.bufferedLine, mr.endLine) {
mr.consumeLine()
if hasPrefixThenNewline(line, mr.dashBoundaryDash) {
// Expected EOF (no error)
// TODO(bradfitz): should return an os.EOF error here, not using nil for errors
return nil, nil
}
if expectNewPart {
return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
}
if mr.partsRead == 0 {
// skip line
mr.consumeLine()
continue
}
return nil, os.NewError("Unexpected line in Next().")
if bytes.Equal(line, []byte("\r\n")) {
// Consume the "\r\n" separator between the
// body of the previous part and the boundary
// line we now expect will follow. (either a
// new part or the end boundary)
expectNewPart = true
continue
}
return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line)
}
panic("unreachable")
}
func isBoundaryDelimiterLine(line, dashPrefix string) bool {
func (mr *multiReader) isBoundaryDelimiterLine(line []byte) bool {
// http://tools.ietf.org/html/rfc2046#section-5.1
// The boundary delimiter line is then defined as a line
// consisting entirely of two hyphen characters ("-",
// decimal value 45) followed by the boundary parameter
// value from the Content-Type header field, optional linear
// whitespace, and a terminating CRLF.
if !strings.HasPrefix(line, dashPrefix) {
if !bytes.HasPrefix(line, mr.dashBoundary) {
return false
}
if strings.HasSuffix(line, "\r\n") {
return onlyHorizontalWhitespace(line[len(dashPrefix) : len(line)-2])
if bytes.HasSuffix(line, []byte("\r\n")) {
return onlyHorizontalWhitespace(line[len(mr.dashBoundary) : len(line)-2])
}
// Violate the spec and also support newlines without the
// carriage return...
if strings.HasSuffix(line, "\n") {
return onlyHorizontalWhitespace(line[len(dashPrefix) : len(line)-1])
if bytes.HasSuffix(line, []byte("\n")) {
return onlyHorizontalWhitespace(line[len(mr.dashBoundary) : len(line)-1])
}
return false
}
func onlyHorizontalWhitespace(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] != ' ' && s[i] != '\t' {
func onlyHorizontalWhitespace(s []byte) bool {
for _, b := range s {
if b != ' ' && b != '\t' {
return false
}
}
return true
}
func hasPrefixThenNewline(s, prefix string) bool {
return strings.HasPrefix(s, prefix) &&
(len(s) == len(prefix)+1 && strings.HasSuffix(s, "\n") ||
len(s) == len(prefix)+2 && strings.HasSuffix(s, "\r\n"))
func hasPrefixThenNewline(s, prefix []byte) bool {
return bytes.HasPrefix(s, prefix) &&
(len(s) == len(prefix)+1 && s[len(s)-1] == '\n' ||
len(s) == len(prefix)+2 && bytes.HasSuffix(s, []byte("\r\n")))
}

View File

@@ -8,38 +8,37 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"json"
"os"
"regexp"
"strings"
"testing"
)
func TestHorizontalWhitespace(t *testing.T) {
if !onlyHorizontalWhitespace(" \t") {
if !onlyHorizontalWhitespace([]byte(" \t")) {
t.Error("expected pass")
}
if onlyHorizontalWhitespace("foo bar") {
if onlyHorizontalWhitespace([]byte("foo bar")) {
t.Error("expected failure")
}
}
func TestBoundaryLine(t *testing.T) {
boundary := "myBoundary"
prefix := "--" + boundary
if !isBoundaryDelimiterLine("--myBoundary\r\n", prefix) {
mr := NewReader(strings.NewReader(""), "myBoundary").(*multiReader)
if !mr.isBoundaryDelimiterLine([]byte("--myBoundary\r\n")) {
t.Error("expected")
}
if !isBoundaryDelimiterLine("--myBoundary \r\n", prefix) {
if !mr.isBoundaryDelimiterLine([]byte("--myBoundary \r\n")) {
t.Error("expected")
}
if !isBoundaryDelimiterLine("--myBoundary \n", prefix) {
if !mr.isBoundaryDelimiterLine([]byte("--myBoundary \n")) {
t.Error("expected")
}
if isBoundaryDelimiterLine("--myBoundary bogus \n", prefix) {
if mr.isBoundaryDelimiterLine([]byte("--myBoundary bogus \n")) {
t.Error("expected fail")
}
if isBoundaryDelimiterLine("--myBoundary bogus--", prefix) {
if mr.isBoundaryDelimiterLine([]byte("--myBoundary bogus--")) {
t.Error("expected fail")
}
}
@@ -79,7 +78,9 @@ func TestFormName(t *testing.T) {
}
}
func TestMultipart(t *testing.T) {
var longLine = strings.Repeat("\n\n\r\r\r\n\r\000", (1<<20)/8)
func testMultipartBody() string {
testBody := `
This is a multi-part message. This line is ignored.
--MyBoundary
@@ -90,6 +91,10 @@ foo-bar: baz
My value
The end.
--MyBoundary
name: bigsection
[longline]
--MyBoundary
Header1: value1b
HEADER2: value2b
foo-bar: bazb
@@ -102,11 +107,26 @@ Line 3 ends in a newline, but just one.
never read data
--MyBoundary--
`
testBody = regexp.MustCompile("\n").ReplaceAllString(testBody, "\r\n")
bodyReader := strings.NewReader(testBody)
reader := NewReader(bodyReader, "MyBoundary")
useless trailer
`
testBody = strings.Replace(testBody, "\n", "\r\n", -1)
return strings.Replace(testBody, "[longline]", longLine, 1)
}
func TestMultipart(t *testing.T) {
bodyReader := strings.NewReader(testMultipartBody())
testMultipart(t, bodyReader)
}
func TestMultipartSlowInput(t *testing.T) {
bodyReader := strings.NewReader(testMultipartBody())
testMultipart(t, &slowReader{bodyReader})
}
func testMultipart(t *testing.T, r io.Reader) {
reader := NewReader(r, "MyBoundary")
buf := new(bytes.Buffer)
// Part1
@@ -125,38 +145,64 @@ never read data
t.Error("Expected Foo-Bar: baz")
}
buf.Reset()
io.Copy(buf, part)
if _, err := io.Copy(buf, part); err != nil {
t.Errorf("part 1 copy: %v", err)
}
expectEq(t, "My value\r\nThe end.",
buf.String(), "Value of first part")
// Part2
part, err = reader.NextPart()
if err != nil {
t.Fatalf("Expected part2; got: %v", err)
return
}
if e, g := "bigsection", part.Header.Get("name"); e != g {
t.Errorf("part2's name header: expected %q, got %q", e, g)
}
buf.Reset()
if _, err := io.Copy(buf, part); err != nil {
t.Errorf("part 2 copy: %v", err)
}
s := buf.String()
if len(s) != len(longLine) {
t.Errorf("part2 body expected long line of length %d; got length %d",
len(longLine), len(s))
}
if s != longLine {
t.Errorf("part2 long body didn't match")
}
// Part3
part, err = reader.NextPart()
if part == nil || err != nil {
t.Error("Expected part2")
t.Error("Expected part3")
return
}
if part.Header.Get("foo-bar") != "bazb" {
t.Error("Expected foo-bar: bazb")
}
buf.Reset()
io.Copy(buf, part)
if _, err := io.Copy(buf, part); err != nil {
t.Errorf("part 3 copy: %v", err)
}
expectEq(t, "Line 1\r\nLine 2\r\nLine 3 ends in a newline, but just one.\r\n",
buf.String(), "Value of second part")
buf.String(), "body of part 3")
// Part3
// Part4
part, err = reader.NextPart()
if part == nil || err != nil {
t.Error("Expected part3 without errors")
t.Error("Expected part 4 without errors")
return
}
// Non-existent part4
// Non-existent part5
part, err = reader.NextPart()
if part != nil {
t.Error("Didn't expect a third part.")
t.Error("Didn't expect a fifth part.")
}
if err != nil {
t.Errorf("Unexpected error getting third part: %v", err)
t.Errorf("Unexpected error getting fifth part: %v", err)
}
}
@@ -237,3 +283,36 @@ func TestLineLimit(t *testing.T) {
t.Errorf("expected to read < %d bytes; read %d", maxReadThreshold, mr.n)
}
}
func TestMultipartTruncated(t *testing.T) {
testBody := `
This is a multi-part message. This line is ignored.
--MyBoundary
foo-bar: baz
Oh no, premature EOF!
`
body := strings.Replace(testBody, "\n", "\r\n", -1)
bodyReader := strings.NewReader(body)
r := NewReader(bodyReader, "MyBoundary")
part, err := r.NextPart()
if err != nil {
t.Fatalf("didn't get a part")
}
_, err = io.Copy(ioutil.Discard, part)
if err != io.ErrUnexpectedEOF {
t.Fatalf("expected error io.ErrUnexpectedEOF; got %v", err)
}
}
type slowReader struct {
r io.Reader
}
func (s *slowReader) Read(p []byte) (int, os.Error) {
if len(p) == 0 {
return s.r.Read(p)
}
return s.r.Read(p[:1])
}

View File

@@ -182,7 +182,9 @@ var valueTests = []pair{
}),
"struct { c chan *int32; d float32 }{chan *int32, 0}",
},
{new(struct{ c func(chan *integer, *int8) }),
{new(struct {
c func(chan *integer, *int8)
}),
"struct { c func(chan *reflect_test.integer, *int8) }{func(chan *reflect_test.integer, *int8)(0)}",
},
{new(struct {
@@ -732,6 +734,24 @@ func TestDeepEqualComplexStructInequality(t *testing.T) {
}
}
type UnexpT struct {
m map[int]int
}
func TestDeepEqualUnexportedMap(t *testing.T) {
// Check that DeepEqual can look at unexported fields.
x1 := UnexpT{map[int]int{1: 2}}
x2 := UnexpT{map[int]int{1: 2}}
if !DeepEqual(&x1, &x2) {
t.Error("DeepEqual(x1, x2) = false, want true")
}
y1 := UnexpT{map[int]int{2: 3}}
if DeepEqual(&x1, &y1) {
t.Error("DeepEqual(x1, y1) = true, want false")
}
}
func check2ndField(x interface{}, offs uintptr, t *testing.T) {
s := ValueOf(x)

View File

@@ -958,14 +958,19 @@ func (v Value) MapIndex(key Value) Value {
iv.mustBe(Map)
typ := iv.typ.toType()
// Do not require ikey to be exported, so that DeepEqual
// and other programs can use all the keys returned by
// MapKeys as arguments to MapIndex. If either the map
// or the key is unexported, though, the result will be
// considered unexported.
ikey := key.internal()
ikey.mustBeExported()
ikey = convertForAssignment("reflect.Value.MapIndex", nil, typ.Key(), ikey)
if iv.word == 0 {
return Value{}
}
flag := iv.flag & flagRO
flag := (iv.flag | ikey.flag) & flagRO
elemType := typ.Elem()
elemWord, ok := mapaccess(iv.word, ikey.word)
if !ok {

View File

@@ -258,11 +258,22 @@ TEXT cas<>(SB),7,$0
TEXT runtime·cas(SB),7,$0
MOVW valptr+0(FP), R2
MOVW old+4(FP), R0
casagain:
MOVW new+8(FP), R1
BL cas<>(SB)
MOVW $0, R0
MOVW.CS $1, R0
BCC cascheck
MOVW $1, R0
RET
cascheck:
// Kernel lies; double-check.
MOVW valptr+0(FP), R2
MOVW old+4(FP), R0
MOVW 0(R2), R3
CMP R0, R3
BEQ casagain
MOVW $0, R0
RET
TEXT runtime·casp(SB),7,$0
B runtime·cas(SB)

View File

@@ -13,6 +13,12 @@
// LR = return address
// The function returns with CS true if the swap happened.
// http://lxr.linux.no/linux+v2.6.37.2/arch/arm/kernel/entry-armv.S#L850
// On older kernels (before 2.6.24) the function can incorrectly
// report a conflict, so we have to double-check the compare ourselves
// and retry if necessary.
//
// http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=b49c0f24cf6744a3f4fd09289fe7cade349dead5
//
TEXT cas<>(SB),7,$0
MOVW $0xffff0fc0, PC
@@ -23,12 +29,23 @@ TEXT ·CompareAndSwapInt32(SB),7,$0
TEXT ·CompareAndSwapUint32(SB),7,$0
MOVW valptr+0(FP), R2
MOVW old+4(FP), R0
casagain:
MOVW new+8(FP), R1
BL cas<>(SB)
MOVW $0, R0
MOVW.CS $1, R0
BCC cascheck
MOVW $1, R0
casret:
MOVW R0, ret+12(FP)
RET
cascheck:
// Kernel lies; double-check.
MOVW valptr+0(FP), R2
MOVW old+4(FP), R0
MOVW 0(R2), R3
CMP R0, R3
BEQ casagain
MOVW $0, R0
B casret
TEXT ·CompareAndSwapUintptr(SB),7,$0
B ·CompareAndSwapUint32(SB)

View File

@@ -220,13 +220,10 @@ func (p *Parser) unmarshal(val reflect.Value, start *StartElement) os.Error {
}
if pv := val; pv.Kind() == reflect.Ptr {
if pv.Pointer() == 0 {
zv := reflect.Zero(pv.Type().Elem())
pv.Set(zv.Addr())
val = zv
} else {
val = pv.Elem()
if pv.IsNil() {
pv.Set(reflect.New(pv.Type().Elem()))
}
val = pv.Elem()
}
var (

View File

@@ -329,47 +329,51 @@ func TestSyntax(t *testing.T) {
}
type allScalars struct {
True1 bool
True2 bool
False1 bool
False2 bool
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
Uint int
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
Uintptr uintptr
Float32 float32
Float64 float64
String string
True1 bool
True2 bool
False1 bool
False2 bool
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
Uint int
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
Uintptr uintptr
Float32 float32
Float64 float64
String string
PtrString *string
}
var all = allScalars{
True1: true,
True2: true,
False1: false,
False2: false,
Int: 1,
Int8: -2,
Int16: 3,
Int32: -4,
Int64: 5,
Uint: 6,
Uint8: 7,
Uint16: 8,
Uint32: 9,
Uint64: 10,
Uintptr: 11,
Float32: 13.0,
Float64: 14.0,
String: "15",
True1: true,
True2: true,
False1: false,
False2: false,
Int: 1,
Int8: -2,
Int16: 3,
Int32: -4,
Int64: 5,
Uint: 6,
Uint8: 7,
Uint16: 8,
Uint32: 9,
Uint64: 10,
Uintptr: 11,
Float32: 13.0,
Float64: 14.0,
String: "15",
PtrString: &sixteen,
}
var sixteen = "16"
const testScalarsInput = `<allscalars>
<true1>true</true1>
<true2>1</true2>
@@ -390,6 +394,7 @@ const testScalarsInput = `<allscalars>
<float32>13.0</float32>
<float64>14.0</float64>
<string>15</string>
<ptrstring>16</ptrstring>
</allscalars>`
func TestAllScalars(t *testing.T) {
@@ -401,7 +406,7 @@ func TestAllScalars(t *testing.T) {
t.Fatal(err)
}
if !reflect.DeepEqual(a, all) {
t.Errorf("expected %+v got %+v", all, a)
t.Errorf("have %+v want %+v", a, all)
}
}