1 |
/* |
2 |
* Copyright (c) 1995 Danny Gasparovski. |
3 |
* |
4 |
* Please read the file COPYRIGHT for the |
5 |
* terms and conditions of the copyright. |
6 |
*/ |
7 |
|
8 |
#include <slirp.h> |
9 |
|
10 |
int if_mtu, if_mru; |
11 |
int if_comp; |
12 |
int if_maxlinkhdr; |
13 |
int if_queued = 0; /* Number of packets queued so far */ |
14 |
int if_thresh = 10; /* Number of packets queued before we start sending |
15 |
* (to prevent allocing too many mbufs) */ |
16 |
|
17 |
struct mbuf if_fastq; /* fast queue (for interactive data) */ |
18 |
struct mbuf if_batchq; /* queue for non-interactive data */ |
19 |
struct mbuf *next_m; /* Pointer to next mbuf to output */ |
20 |
|
21 |
#define ifs_init(ifm) ((ifm)->ifs_next = (ifm)->ifs_prev = (ifm)) |
22 |
|
23 |
void |
24 |
ifs_insque(ifm, ifmhead) |
25 |
struct mbuf *ifm, *ifmhead; |
26 |
{ |
27 |
ifm->ifs_next = ifmhead->ifs_next; |
28 |
ifmhead->ifs_next = ifm; |
29 |
ifm->ifs_prev = ifmhead; |
30 |
ifm->ifs_next->ifs_prev = ifm; |
31 |
} |
32 |
|
33 |
void |
34 |
ifs_remque(ifm) |
35 |
struct mbuf *ifm; |
36 |
{ |
37 |
ifm->ifs_prev->ifs_next = ifm->ifs_next; |
38 |
ifm->ifs_next->ifs_prev = ifm->ifs_prev; |
39 |
} |
40 |
|
41 |
void |
42 |
if_init() |
43 |
{ |
44 |
#if 0 |
45 |
/* |
46 |
* Set if_maxlinkhdr to 48 because it's 40 bytes for TCP/IP, |
47 |
* and 8 bytes for PPP, but need to have it on an 8byte boundary |
48 |
*/ |
49 |
#ifdef USE_PPP |
50 |
if_maxlinkhdr = 48; |
51 |
#else |
52 |
if_maxlinkhdr = 40; |
53 |
#endif |
54 |
#else |
55 |
/* 2 for alignment, 14 for ethernet, 40 for TCP/IP */ |
56 |
if_maxlinkhdr = 2 + 14 + 40; |
57 |
#endif |
58 |
if_mtu = 1500; |
59 |
if_mru = 1500; |
60 |
if_comp = IF_AUTOCOMP; |
61 |
if_fastq.ifq_next = if_fastq.ifq_prev = &if_fastq; |
62 |
if_batchq.ifq_next = if_batchq.ifq_prev = &if_batchq; |
63 |
// sl_compress_init(&comp_s); |
64 |
next_m = &if_batchq; |
65 |
} |
66 |
|
67 |
#if 0 |
68 |
/* |
69 |
* This shouldn't be needed since the modem is blocking and |
70 |
* we don't expect any signals, but what the hell.. |
71 |
*/ |
72 |
inline int |
73 |
writen(fd, bptr, n) |
74 |
int fd; |
75 |
char *bptr; |
76 |
int n; |
77 |
{ |
78 |
int ret; |
79 |
int total; |
80 |
|
81 |
/* This should succeed most of the time */ |
82 |
ret = send(fd, bptr, n,0); |
83 |
if (ret == n || ret <= 0) |
84 |
return ret; |
85 |
|
86 |
/* Didn't write everything, go into the loop */ |
87 |
total = ret; |
88 |
while (n > total) { |
89 |
ret = send(fd, bptr+total, n-total,0); |
90 |
if (ret <= 0) |
91 |
return ret; |
92 |
total += ret; |
93 |
} |
94 |
return total; |
95 |
} |
96 |
|
97 |
/* |
98 |
* if_input - read() the tty, do "top level" processing (ie: check for any escapes), |
99 |
* and pass onto (*ttyp->if_input) |
100 |
* |
101 |
* XXXXX Any zeros arriving by themselves are NOT placed into the arriving packet. |
102 |
*/ |
103 |
#define INBUFF_SIZE 2048 /* XXX */ |
104 |
void |
105 |
if_input(ttyp) |
106 |
struct ttys *ttyp; |
107 |
{ |
108 |
u_char if_inbuff[INBUFF_SIZE]; |
109 |
int if_n; |
110 |
|
111 |
DEBUG_CALL("if_input"); |
112 |
DEBUG_ARG("ttyp = %lx", (long)ttyp); |
113 |
|
114 |
if_n = recv(ttyp->fd, (char *)if_inbuff, INBUFF_SIZE,0); |
115 |
|
116 |
DEBUG_MISC((dfd, " read %d bytes\n", if_n)); |
117 |
|
118 |
if (if_n <= 0) { |
119 |
if (if_n == 0 || (errno != EINTR && errno != EAGAIN)) { |
120 |
if (ttyp->up) |
121 |
link_up--; |
122 |
tty_detached(ttyp, 0); |
123 |
} |
124 |
return; |
125 |
} |
126 |
if (if_n == 1) { |
127 |
if (*if_inbuff == '0') { |
128 |
ttyp->ones = 0; |
129 |
if (++ttyp->zeros >= 5) |
130 |
slirp_exit(0); |
131 |
return; |
132 |
} |
133 |
if (*if_inbuff == '1') { |
134 |
ttyp->zeros = 0; |
135 |
if (++ttyp->ones >= 5) |
136 |
tty_detached(ttyp, 0); |
137 |
return; |
138 |
} |
139 |
} |
140 |
ttyp->ones = ttyp->zeros = 0; |
141 |
|
142 |
(*ttyp->if_input)(ttyp, if_inbuff, if_n); |
143 |
} |
144 |
#endif |
145 |
|
146 |
/* |
147 |
* if_output: Queue packet into an output queue. |
148 |
* There are 2 output queue's, if_fastq and if_batchq. |
149 |
* Each output queue is a doubly linked list of double linked lists |
150 |
* of mbufs, each list belonging to one "session" (socket). This |
151 |
* way, we can output packets fairly by sending one packet from each |
152 |
* session, instead of all the packets from one session, then all packets |
153 |
* from the next session, etc. Packets on the if_fastq get absolute |
154 |
* priority, but if one session hogs the link, it gets "downgraded" |
155 |
* to the batchq until it runs out of packets, then it'll return |
156 |
* to the fastq (eg. if the user does an ls -alR in a telnet session, |
157 |
* it'll temporarily get downgraded to the batchq) |
158 |
*/ |
159 |
void |
160 |
if_output(so, ifm) |
161 |
struct socket *so; |
162 |
struct mbuf *ifm; |
163 |
{ |
164 |
struct mbuf *ifq; |
165 |
int on_fastq = 1; |
166 |
|
167 |
DEBUG_CALL("if_output"); |
168 |
DEBUG_ARG("so = %lx", (long)so); |
169 |
DEBUG_ARG("ifm = %lx", (long)ifm); |
170 |
|
171 |
/* |
172 |
* First remove the mbuf from m_usedlist, |
173 |
* since we're gonna use m_next and m_prev ourselves |
174 |
* XXX Shouldn't need this, gotta change dtom() etc. |
175 |
*/ |
176 |
if (ifm->m_flags & M_USEDLIST) { |
177 |
remque(ifm); |
178 |
ifm->m_flags &= ~M_USEDLIST; |
179 |
} |
180 |
|
181 |
/* |
182 |
* See if there's already a batchq list for this session. |
183 |
* This can include an interactive session, which should go on fastq, |
184 |
* but gets too greedy... hence it'll be downgraded from fastq to batchq. |
185 |
* We mustn't put this packet back on the fastq (or we'll send it out of order) |
186 |
* XXX add cache here? |
187 |
*/ |
188 |
for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) { |
189 |
if (so == ifq->ifq_so) { |
190 |
/* A match! */ |
191 |
ifm->ifq_so = so; |
192 |
ifs_insque(ifm, ifq->ifs_prev); |
193 |
goto diddit; |
194 |
} |
195 |
} |
196 |
|
197 |
/* No match, check which queue to put it on */ |
198 |
if (so && (so->so_iptos & IPTOS_LOWDELAY)) { |
199 |
ifq = if_fastq.ifq_prev; |
200 |
on_fastq = 1; |
201 |
/* |
202 |
* Check if this packet is a part of the last |
203 |
* packet's session |
204 |
*/ |
205 |
if (ifq->ifq_so == so) { |
206 |
ifm->ifq_so = so; |
207 |
ifs_insque(ifm, ifq->ifs_prev); |
208 |
goto diddit; |
209 |
} |
210 |
} else |
211 |
ifq = if_batchq.ifq_prev; |
212 |
|
213 |
/* Create a new doubly linked list for this session */ |
214 |
ifm->ifq_so = so; |
215 |
ifs_init(ifm); |
216 |
insque(ifm, ifq); |
217 |
|
218 |
diddit: |
219 |
++if_queued; |
220 |
|
221 |
if (so) { |
222 |
/* Update *_queued */ |
223 |
so->so_queued++; |
224 |
so->so_nqueued++; |
225 |
/* |
226 |
* Check if the interactive session should be downgraded to |
227 |
* the batchq. A session is downgraded if it has queued 6 |
228 |
* packets without pausing, and at least 3 of those packets |
229 |
* have been sent over the link |
230 |
* (XXX These are arbitrary numbers, probably not optimal..) |
231 |
*/ |
232 |
if (on_fastq && ((so->so_nqueued >= 6) && |
233 |
(so->so_nqueued - so->so_queued) >= 3)) { |
234 |
|
235 |
/* Remove from current queue... */ |
236 |
remque(ifm->ifs_next); |
237 |
|
238 |
/* ...And insert in the new. That'll teach ya! */ |
239 |
insque(ifm->ifs_next, &if_batchq); |
240 |
} |
241 |
} |
242 |
|
243 |
#ifndef FULL_BOLT |
244 |
/* |
245 |
* This prevents us from malloc()ing too many mbufs |
246 |
*/ |
247 |
if (link_up) { |
248 |
/* if_start will check towrite */ |
249 |
if_start(); |
250 |
} |
251 |
#endif |
252 |
} |
253 |
|
254 |
/* |
255 |
* Send a packet |
256 |
* We choose a packet based on it's position in the output queues; |
257 |
* If there are packets on the fastq, they are sent FIFO, before |
258 |
* everything else. Otherwise we choose the first packet from the |
259 |
* batchq and send it. the next packet chosen will be from the session |
260 |
* after this one, then the session after that one, and so on.. So, |
261 |
* for example, if there are 3 ftp session's fighting for bandwidth, |
262 |
* one packet will be sent from the first session, then one packet |
263 |
* from the second session, then one packet from the third, then back |
264 |
* to the first, etc. etc. |
265 |
*/ |
266 |
void |
267 |
if_start(void) |
268 |
{ |
269 |
struct mbuf *ifm, *ifqt; |
270 |
|
271 |
DEBUG_CALL("if_start"); |
272 |
|
273 |
if (if_queued == 0) |
274 |
return; /* Nothing to do */ |
275 |
|
276 |
again: |
277 |
/* check if we can really output */ |
278 |
if (!slirp_can_output()) |
279 |
return; |
280 |
|
281 |
/* |
282 |
* See which queue to get next packet from |
283 |
* If there's something in the fastq, select it immediately |
284 |
*/ |
285 |
if (if_fastq.ifq_next != &if_fastq) { |
286 |
ifm = if_fastq.ifq_next; |
287 |
} else { |
288 |
/* Nothing on fastq, see if next_m is valid */ |
289 |
if (next_m != &if_batchq) |
290 |
ifm = next_m; |
291 |
else |
292 |
ifm = if_batchq.ifq_next; |
293 |
|
294 |
/* Set which packet to send on next iteration */ |
295 |
next_m = ifm->ifq_next; |
296 |
} |
297 |
/* Remove it from the queue */ |
298 |
ifqt = ifm->ifq_prev; |
299 |
remque(ifm); |
300 |
--if_queued; |
301 |
|
302 |
/* If there are more packets for this session, re-queue them */ |
303 |
if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) { |
304 |
insque(ifm->ifs_next, ifqt); |
305 |
ifs_remque(ifm); |
306 |
} |
307 |
|
308 |
/* Update so_queued */ |
309 |
if (ifm->ifq_so) { |
310 |
if (--ifm->ifq_so->so_queued == 0) |
311 |
/* If there's no more queued, reset nqueued */ |
312 |
ifm->ifq_so->so_nqueued = 0; |
313 |
} |
314 |
|
315 |
/* Encapsulate the packet for sending */ |
316 |
if_encap((uint8_t*)ifm->m_data, ifm->m_len); |
317 |
|
318 |
m_free(ifm); |
319 |
|
320 |
if (if_queued) |
321 |
goto again; |
322 |
} |