http://python-data.dr-chuck.net/geojson?sensor=false&address=Vilnius+University
1 {
2 "results" : [
3 {
4 "address_components" : [
5 {
6 "long_name" : "Vilnius",
7 "short_name" : "Vilnius",
8 "types" : [ "locality", "political" ]
9 },
10 {
11 "long_name" : "Vilnius city municipality",
12 "short_name" : "Vilnius city municipality",
13 "types" : [ "administrative_area_level_2", "political" ]
14 },
15 {
16 "long_name" : "Vilnius County",
17 "short_name" : "Vilnius County",
18 "types" : [ "administrative_area_level_1", "political" ]
19 },
20 {
21 "long_name" : "Lithuania",
22 "short_name" : "LT",
23 "types" : [ "country", "political" ]
24 }
25 ],
26 "formatted_address" : "Vilnius, Lithuania",
27 "geometry" : {
28 "bounds" : {
29 "northeast" : {
30 "lat" : 54.832544,
31 "lng" : 25.4814883
32 },
33 "southwest" : {
34 "lat" : 54.567798,
35 "lng" : 25.024376
36 }
37 },
38 "location" : {
39 "lat" : 54.6871555,
40 "lng" : 25.2796514
41 },
42 "location_type" : "APPROXIMATE",
43 "viewport" : {
44 "northeast" : {
45 "lat" : 54.832544,
46 "lng" : 25.4814883
47 },
48 "southwest" : {
49 "lat" : 54.567798,
50 "lng" : 25.024376
51 }
52 }
53 },
54 "partial_match" : true,
55 "place_id" : "ChIJ9QhkXPuT3UYRQNzpcIzRAAQ",
56 "types" : [ "locality", "political" ]
57 }
58 ],
59 "status" : "OK"
60 }
1 import urllib
2 import json
3
4 url = 'http://python-data.dr-chuck.net/geojson?'
5
6 while True:
7 address = raw_input()
8 serviceUrl = url + urllib.urlencode({'sensor': 'false', 'address': address})
9 print serviceUrl
10 input = urllib.urlopen(serviceUrl).read()
11 js = json.loads(input)
12 print js['results'][0]['place_id']

http://python-data.dr-chuck.net/comments_194528.json
1 {
2 "note":"This file contains the actual data for your assignment</note>",
3 "comments":[
4 {
5 "name":"Anastasija",
6 "count":100
7 },
8 {
9 "name":"Ridwan",
10 "count":98
11 },
12 {
13 "name":"Rory",
14 "count":88
15 },
16 {
17 "name":"Aeryn",
18 "count":88
19 },
20 {
21 "name":"Marcus",
22 "count":84
23 },
24 {
25 "name":"Jock",
26 "count":83
27 },
28 {
29 "name":"Talise",
30 "count":81
31 },
32 {
33 "name":"Rheanne",
34 "count":80
35 },
36 {
37 "name":"Wardah",
38 "count":78
39 },
40 {
41 "name":"Karyss",
42 "count":74
43 },
44 {
45 "name":"Mea",
46 "count":70
47 },
48 {
49 "name":"Maryk",
50 "count":69
51 },
52 {
53 "name":"Neshawn",
54 "count":69
55 },
56 {
57 "name":"Miyah",
58 "count":63
59 },
60 {
61 "name":"Janelle",
62 "count":63
63 },
64 {
65 "name":"Finnen",
66 "count":60
67 },
68 {
69 "name":"Michela",
70 "count":59
71 },
72 {
73 "name":"Yishuka",
74 "count":57
75 },
76 {
77 "name":"Kern",
78 "count":57
79 },
80 {
81 "name":"Tanner",
82 "count":57
83 },
84 {
85 "name":"Zhong",
86 "count":56
87 },
88 {
89 "name":"Merin",
90 "count":54
91 },
92 {
93 "name":"Jian",
94 "count":53
95 },
96 {
97 "name":"Khadijah",
98 "count":52
99 },
100 {
101 "name":"Lauren",
102 "count":49
103 },
104 {
105 "name":"Adam",
106 "count":47
107 },
108 {
109 "name":"Akan",
110 "count":43
111 },
112 {
113 "name":"Shauni",
114 "count":43
115 },
116 {
117 "name":"Klein",
118 "count":40
119 },
120 {
121 "name":"Brett",
122 "count":38
123 },
124 {
125 "name":"Ayat",
126 "count":37
127 },
128 {
129 "name":"Rayna",
130 "count":35
131 },
132 {
133 "name":"Duaa",
134 "count":34
135 },
136 {
137 "name":"Mariyah",
138 "count":32
139 },
140 {
141 "name":"Matthias",
142 "count":30
143 },
144 {
145 "name":"Olurotimi",
146 "count":26
147 },
148 {
149 "name":"Odynn",
150 "count":23
151 },
152 {
153 "name":"Shahna",
154 "count":21
155 },
156 {
157 "name":"Saphyre",
158 "count":20
159 },
160 {
161 "name":"Bowie",
162 "count":19
163 },
164 {
165 "name":"Keera",
166 "count":18
167 },
168 {
169 "name":"Ross",
170 "count":14
171 },
172 {
173 "name":"Caragh",
174 "count":13
175 },
176 {
177 "name":"Siena",
178 "count":12
179 },
180 {
181 "name":"Carley",
182 "count":9
183 },
184 {
185 "name":"Waqaas",
186 "count":9
187 },
188 {
189 "name":"Kira",
190 "count":9
191 },
192 {
193 "name":"Avinash",
194 "count":3
195 },
196 {
197 "name":"Alysia",
198 "count":1
199 },
200 {
201 "name":"Josephina",
202 "count":1
203 }
204 ]
205 }
1 import urllib 2 import json 3 4 url = 'http://python-data.dr-chuck.net/comments_194528.json' 5 6 input = urllib.urlopen(url).read() 7 js = json.loads(input) 8 sum = 0 9 for dict in js['comments']: 10 sum += int(dict['count']) 11 print sum
1 import socket
2 import time
3 mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
4 mysock.connect(('news.xinhuanet.com', 80))
5 mysock.send('GET http://news.xinhuanet.com/photo/2015-12/01/128485250_14489244150011n.jpg HTTP/1.0\n\n')
6 count = 0
7
8 picture = "";
9 while True:
10 data = mysock.recv(5120)
11 if ( len(data) < 1 ) : break
12 # time.sleep(0.25)
13 count = count + len(data)
14 print len(data),count
15 picture = picture + data
16 mysock.close()
17 # Look for the end of the header (2 CRLF)
18 pos = picture.find("\r\n\r\n")
19 print 'Header length',pos
20 print picture[:pos]
21 # Skip past the header and save the picture data
22 picture = picture[pos+4:]
23 fhand = open("stuff.jpg","wb")
24 fhand.write(picture)
25 fhand.close()
来源:https://www.cnblogs.com/wanderingzj/p/5010460.html